diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b74f0a4972bc5650aeb989a18d6d2bc73bda2466 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c3098c4e-bffb-43e0-afd2-573b223c5364", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_c3098c4e-bffb-43e0-afd2-573b223c5364.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_c3098c4e-bffb-43e0-afd2-573b223c5364.txt new file mode 100644 index 0000000000000000000000000000000000000000..95296b451fdf1eb7012c70d21206435c31399a6d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_c3098c4e-bffb-43e0-afd2-573b223c5364.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:17:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:17:31 2025 --- +[2025-07-05 08:17:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:17:31 2025 --- +[2025-07-05 08:17:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:17:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:17:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:17:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:17:31] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:17:31] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:17:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42 +[2025-07-05 08:17:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42 +[2025-07-05 08:17:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:17:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:17:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:17:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:17:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:17:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:17:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:17:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:17:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:17:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:17:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:17:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:17:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:17:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:17:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:17:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:17:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:17:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:17:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:17:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:17:35] [Rank 0] PRINT: Model returns: +[2025-07-05 08:17:35] [Rank 0] PRINT: Model returns: +[2025-07-05 08:17:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:17:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:17:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:17:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:17:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:17:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:17:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:17:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:17:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:17:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:17:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:17:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:17:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:17:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:17:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:17:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:18:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:18:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:18:42] [Rank 0] PRINT: Starting training... +[2025-07-05 08:18:42] [Rank 0] PRINT: Starting training... +[2025-07-05 08:18:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:18:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:18:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:18:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:18:52] [Rank 0] step:21/10000 train_time:1547ms step_avg:73.67ms +[2025-07-05 08:18:52] [Rank 0] step:21/10000 train_time:1547ms step_avg:73.67ms +[2025-07-05 08:18:53] [Rank 0] step:41/10000 train_time:2999ms step_avg:73.14ms +[2025-07-05 08:18:53] [Rank 0] step:41/10000 train_time:2999ms step_avg:73.14ms +[2025-07-05 08:18:55] [Rank 0] step:61/10000 train_time:4450ms step_avg:72.96ms +[2025-07-05 08:18:55] [Rank 0] step:61/10000 train_time:4450ms step_avg:72.96ms +[2025-07-05 08:18:56] [Rank 0] step:81/10000 train_time:5905ms step_avg:72.90ms +[2025-07-05 08:18:56] [Rank 0] step:81/10000 train_time:5905ms step_avg:72.90ms +[2025-07-05 08:18:58] [Rank 0] step:101/10000 train_time:7740ms step_avg:76.63ms +[2025-07-05 08:18:58] [Rank 0] step:101/10000 train_time:7740ms step_avg:76.63ms +[2025-07-05 08:18:59] [Rank 0] step:121/10000 train_time:9193ms step_avg:75.98ms +[2025-07-05 08:18:59] [Rank 0] step:121/10000 train_time:9193ms step_avg:75.98ms +[2025-07-05 08:19:01] [Rank 0] step:141/10000 train_time:10648ms step_avg:75.52ms +[2025-07-05 08:19:01] [Rank 0] step:141/10000 train_time:10648ms step_avg:75.52ms +[2025-07-05 08:19:02] [Rank 0] step:161/10000 train_time:12103ms step_avg:75.17ms +[2025-07-05 08:19:02] [Rank 0] step:161/10000 train_time:12103ms step_avg:75.17ms +[2025-07-05 08:19:04] [Rank 0] step:181/10000 train_time:13606ms step_avg:75.17ms +[2025-07-05 08:19:04] [Rank 0] step:181/10000 train_time:13606ms step_avg:75.17ms +[2025-07-05 08:19:05] [Rank 0] step:201/10000 train_time:15050ms step_avg:74.87ms +[2025-07-05 08:19:05] [Rank 0] step:201/10000 train_time:15050ms step_avg:74.87ms +[2025-07-05 08:19:07] [Rank 0] step:221/10000 train_time:16505ms step_avg:74.68ms +[2025-07-05 08:19:07] [Rank 0] step:221/10000 train_time:16505ms step_avg:74.68ms +[2025-07-05 08:19:08] [Rank 0] step:241/10000 train_time:17962ms step_avg:74.53ms +[2025-07-05 08:19:08] [Rank 0] step:241/10000 train_time:17962ms step_avg:74.53ms +[2025-07-05 08:19:10] [Rank 0] step:261/10000 train_time:19414ms step_avg:74.38ms +[2025-07-05 08:19:10] [Rank 0] step:261/10000 train_time:19414ms step_avg:74.38ms +[2025-07-05 08:19:11] [Rank 0] step:281/10000 train_time:20905ms step_avg:74.40ms +[2025-07-05 08:19:11] [Rank 0] step:281/10000 train_time:20905ms step_avg:74.40ms +[2025-07-05 08:19:12] [Rank 0] step:301/10000 train_time:22359ms step_avg:74.28ms +[2025-07-05 08:19:12] [Rank 0] step:301/10000 train_time:22359ms step_avg:74.28ms +[2025-07-05 08:19:14] [Rank 0] step:321/10000 train_time:23814ms step_avg:74.19ms +[2025-07-05 08:19:14] [Rank 0] step:321/10000 train_time:23814ms step_avg:74.19ms +[2025-07-05 08:19:16] [Rank 0] step:341/10000 train_time:25482ms step_avg:74.73ms +[2025-07-05 08:19:16] [Rank 0] step:341/10000 train_time:25482ms step_avg:74.73ms +[2025-07-05 08:19:18] [Rank 0] step:361/10000 train_time:27258ms step_avg:75.51ms +[2025-07-05 08:19:18] [Rank 0] step:361/10000 train_time:27258ms step_avg:75.51ms +[2025-07-05 08:19:19] [Rank 0] step:381/10000 train_time:29101ms step_avg:76.38ms +[2025-07-05 08:19:19] [Rank 0] step:381/10000 train_time:29101ms step_avg:76.38ms +[2025-07-05 08:19:21] [Rank 0] step:401/10000 train_time:30556ms step_avg:76.20ms +[2025-07-05 08:19:21] [Rank 0] step:401/10000 train_time:30556ms step_avg:76.20ms +[2025-07-05 08:19:22] [Rank 0] step:421/10000 train_time:32013ms step_avg:76.04ms +[2025-07-05 08:19:22] [Rank 0] step:421/10000 train_time:32013ms step_avg:76.04ms +[2025-07-05 08:19:24] [Rank 0] step:441/10000 train_time:33467ms step_avg:75.89ms +[2025-07-05 08:19:24] [Rank 0] step:441/10000 train_time:33467ms step_avg:75.89ms +[2025-07-05 08:19:25] [Rank 0] step:461/10000 train_time:35162ms step_avg:76.27ms +[2025-07-05 08:19:25] [Rank 0] step:461/10000 train_time:35162ms step_avg:76.27ms +[2025-07-05 08:19:27] [Rank 0] step:481/10000 train_time:36617ms step_avg:76.13ms +[2025-07-05 08:19:27] [Rank 0] step:481/10000 train_time:36617ms step_avg:76.13ms +[2025-07-05 08:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:19:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:19:29] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5918 train_time:38072ms step_avg:76.14ms +[2025-07-05 08:19:29] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5918 train_time:38072ms step_avg:76.14ms +[2025-07-05 08:19:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:19:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cd08cee0634b1a0c9270080cade1d47ff170c6fb --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "953dca4d-18ea-4b41-8a6a-dca8f6563012", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_953dca4d-18ea-4b41-8a6a-dca8f6563012.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_953dca4d-18ea-4b41-8a6a-dca8f6563012.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2d5ec47c38570f5dc59be754ceff40170dafacf --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_953dca4d-18ea-4b41-8a6a-dca8f6563012.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:44:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:44:45 2025 --- +[2025-07-05 08:44:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:44:45 2025 --- +[2025-07-05 08:44:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:44:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:44:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:44:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:44:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:44:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:44:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43 +[2025-07-05 08:44:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43 +[2025-07-05 08:44:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:44:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:44:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:44:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:44:45] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:44:45] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:44:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:44:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:44:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:44:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:44:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:44:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:44:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:44:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:44:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:44:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:44:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:44:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:44:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:44:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:44:48] [Rank 0] PRINT: Model returns: +[2025-07-05 08:44:48] [Rank 0] PRINT: Model returns: +[2025-07-05 08:44:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:44:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:44:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:44:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:44:48] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:44:48] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:44:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:44:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:44:48] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:44:48] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:44:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:44:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:44:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:44:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:44:48] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:44:48] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:45:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:45:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:45:53] [Rank 0] PRINT: Starting training... +[2025-07-05 08:45:53] [Rank 0] PRINT: Starting training... +[2025-07-05 08:45:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:45:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:46:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:46:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:46:03] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-05 08:46:03] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-05 08:46:04] [Rank 0] step:41/10000 train_time:3201ms step_avg:78.07ms +[2025-07-05 08:46:04] [Rank 0] step:41/10000 train_time:3201ms step_avg:78.07ms +[2025-07-05 08:46:06] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.34ms +[2025-07-05 08:46:06] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.34ms +[2025-07-05 08:46:07] [Rank 0] step:81/10000 train_time:6110ms step_avg:75.44ms +[2025-07-05 08:46:07] [Rank 0] step:81/10000 train_time:6110ms step_avg:75.44ms +[2025-07-05 08:46:09] [Rank 0] step:101/10000 train_time:8239ms step_avg:81.58ms +[2025-07-05 08:46:09] [Rank 0] step:101/10000 train_time:8239ms step_avg:81.58ms +[2025-07-05 08:46:11] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-05 08:46:11] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-05 08:46:12] [Rank 0] step:141/10000 train_time:11148ms step_avg:79.06ms +[2025-07-05 08:46:12] [Rank 0] step:141/10000 train_time:11148ms step_avg:79.06ms +[2025-07-05 08:46:14] [Rank 0] step:161/10000 train_time:12604ms step_avg:78.29ms +[2025-07-05 08:46:14] [Rank 0] step:161/10000 train_time:12604ms step_avg:78.29ms +[2025-07-05 08:46:16] [Rank 0] step:181/10000 train_time:14730ms step_avg:81.38ms +[2025-07-05 08:46:16] [Rank 0] step:181/10000 train_time:14730ms step_avg:81.38ms +[2025-07-05 08:46:17] [Rank 0] step:201/10000 train_time:16266ms step_avg:80.93ms +[2025-07-05 08:46:17] [Rank 0] step:201/10000 train_time:16266ms step_avg:80.93ms +[2025-07-05 08:46:19] [Rank 0] step:221/10000 train_time:17722ms step_avg:80.19ms +[2025-07-05 08:46:19] [Rank 0] step:221/10000 train_time:17722ms step_avg:80.19ms +[2025-07-05 08:46:20] [Rank 0] step:241/10000 train_time:19180ms step_avg:79.58ms +[2025-07-05 08:46:20] [Rank 0] step:241/10000 train_time:19180ms step_avg:79.58ms +[2025-07-05 08:46:22] [Rank 0] step:261/10000 train_time:20634ms step_avg:79.06ms +[2025-07-05 08:46:22] [Rank 0] step:261/10000 train_time:20634ms step_avg:79.06ms +[2025-07-05 08:46:23] [Rank 0] step:281/10000 train_time:22328ms step_avg:79.46ms +[2025-07-05 08:46:23] [Rank 0] step:281/10000 train_time:22328ms step_avg:79.46ms +[2025-07-05 08:46:25] [Rank 0] step:301/10000 train_time:23783ms step_avg:79.01ms +[2025-07-05 08:46:25] [Rank 0] step:301/10000 train_time:23783ms step_avg:79.01ms +[2025-07-05 08:46:26] [Rank 0] step:321/10000 train_time:25240ms step_avg:78.63ms +[2025-07-05 08:46:26] [Rank 0] step:321/10000 train_time:25240ms step_avg:78.63ms +[2025-07-05 08:46:28] [Rank 0] step:341/10000 train_time:26697ms step_avg:78.29ms +[2025-07-05 08:46:28] [Rank 0] step:341/10000 train_time:26697ms step_avg:78.29ms +[2025-07-05 08:46:30] [Rank 0] step:361/10000 train_time:28810ms step_avg:79.81ms +[2025-07-05 08:46:30] [Rank 0] step:361/10000 train_time:28810ms step_avg:79.81ms +[2025-07-05 08:46:31] [Rank 0] step:381/10000 train_time:30249ms step_avg:79.39ms +[2025-07-05 08:46:31] [Rank 0] step:381/10000 train_time:30249ms step_avg:79.39ms +[2025-07-05 08:46:33] [Rank 0] step:401/10000 train_time:31805ms step_avg:79.31ms +[2025-07-05 08:46:33] [Rank 0] step:401/10000 train_time:31805ms step_avg:79.31ms +[2025-07-05 08:46:34] [Rank 0] step:421/10000 train_time:33263ms step_avg:79.01ms +[2025-07-05 08:46:34] [Rank 0] step:421/10000 train_time:33263ms step_avg:79.01ms +[2025-07-05 08:46:36] [Rank 0] step:441/10000 train_time:34720ms step_avg:78.73ms +[2025-07-05 08:46:36] [Rank 0] step:441/10000 train_time:34720ms step_avg:78.73ms +[2025-07-05 08:46:38] [Rank 0] step:461/10000 train_time:36843ms step_avg:79.92ms +[2025-07-05 08:46:38] [Rank 0] step:461/10000 train_time:36843ms step_avg:79.92ms +[2025-07-05 08:46:39] [Rank 0] step:481/10000 train_time:38436ms step_avg:79.91ms +[2025-07-05 08:46:39] [Rank 0] step:481/10000 train_time:38436ms step_avg:79.91ms +[2025-07-05 08:46:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:46:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:46:42] [Rank 0] PRINT: step:500/10000 train_loss:9.6492 val_loss:8.5917 train_time:40018ms step_avg:80.04ms +[2025-07-05 08:46:42] [Rank 0] PRINT: step:500/10000 train_loss:9.6492 val_loss:8.5917 train_time:40018ms step_avg:80.04ms +[2025-07-05 08:46:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:46:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6aa3f97d922dc5dff42711345ec251642737bd64 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e8c5c877-f888-4045-9afa-947ed49725a3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/training_log_e8c5c877-f888-4045-9afa-947ed49725a3.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/training_log_e8c5c877-f888-4045-9afa-947ed49725a3.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b16f7c053bae1c4f241c6bbe3adc7c3ec8b53c2 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44/training_log_e8c5c877-f888-4045-9afa-947ed49725a3.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:12:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:12:21 2025 --- +[2025-07-05 09:12:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:12:21 2025 --- +[2025-07-05 09:12:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:12:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:12:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:12:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:12:21] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:12:21] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:12:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44 +[2025-07-05 09:12:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_44 +[2025-07-05 09:12:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:12:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:12:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:12:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:12:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:12:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:12:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:12:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:12:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:12:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:12:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:12:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:12:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:12:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:12:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:12:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:12:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:12:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:12:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:12:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:12:25] [Rank 0] PRINT: Model returns: +[2025-07-05 09:12:25] [Rank 0] PRINT: Model returns: +[2025-07-05 09:12:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:12:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:12:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:12:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:12:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:12:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:12:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:12:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:12:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:12:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:12:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:12:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:12:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:12:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:12:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:12:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:13:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:13:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:13:31] [Rank 0] PRINT: Starting training... +[2025-07-05 09:13:31] [Rank 0] PRINT: Starting training... +[2025-07-05 09:13:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:13:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:13:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:13:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:13:40] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.62ms +[2025-07-05 09:13:40] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.62ms +[2025-07-05 09:13:41] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.16ms +[2025-07-05 09:13:41] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.16ms +[2025-07-05 09:13:43] [Rank 0] step:61/10000 train_time:4452ms step_avg:72.99ms +[2025-07-05 09:13:43] [Rank 0] step:61/10000 train_time:4452ms step_avg:72.99ms +[2025-07-05 09:13:44] [Rank 0] step:81/10000 train_time:5907ms step_avg:72.93ms +[2025-07-05 09:13:44] [Rank 0] step:81/10000 train_time:5907ms step_avg:72.93ms +[2025-07-05 09:13:46] [Rank 0] step:101/10000 train_time:7402ms step_avg:73.29ms +[2025-07-05 09:13:46] [Rank 0] step:101/10000 train_time:7402ms step_avg:73.29ms +[2025-07-05 09:13:47] [Rank 0] step:121/10000 train_time:8854ms step_avg:73.18ms +[2025-07-05 09:13:47] [Rank 0] step:121/10000 train_time:8854ms step_avg:73.18ms +[2025-07-05 09:13:49] [Rank 0] step:141/10000 train_time:10310ms step_avg:73.12ms +[2025-07-05 09:13:49] [Rank 0] step:141/10000 train_time:10310ms step_avg:73.12ms +[2025-07-05 09:13:50] [Rank 0] step:161/10000 train_time:11767ms step_avg:73.09ms +[2025-07-05 09:13:50] [Rank 0] step:161/10000 train_time:11767ms step_avg:73.09ms +[2025-07-05 09:13:52] [Rank 0] step:181/10000 train_time:13271ms step_avg:73.32ms +[2025-07-05 09:13:52] [Rank 0] step:181/10000 train_time:13271ms step_avg:73.32ms +[2025-07-05 09:13:53] [Rank 0] step:201/10000 train_time:14909ms step_avg:74.18ms +[2025-07-05 09:13:53] [Rank 0] step:201/10000 train_time:14909ms step_avg:74.18ms +[2025-07-05 09:13:55] [Rank 0] step:221/10000 train_time:16364ms step_avg:74.05ms +[2025-07-05 09:13:55] [Rank 0] step:221/10000 train_time:16364ms step_avg:74.05ms +[2025-07-05 09:13:56] [Rank 0] step:241/10000 train_time:17821ms step_avg:73.95ms +[2025-07-05 09:13:56] [Rank 0] step:241/10000 train_time:17821ms step_avg:73.95ms +[2025-07-05 09:13:58] [Rank 0] step:261/10000 train_time:19274ms step_avg:73.85ms +[2025-07-05 09:13:58] [Rank 0] step:261/10000 train_time:19274ms step_avg:73.85ms +[2025-07-05 09:13:59] [Rank 0] step:281/10000 train_time:20966ms step_avg:74.61ms +[2025-07-05 09:13:59] [Rank 0] step:281/10000 train_time:20966ms step_avg:74.61ms +[2025-07-05 09:14:01] [Rank 0] step:301/10000 train_time:22421ms step_avg:74.49ms +[2025-07-05 09:14:01] [Rank 0] step:301/10000 train_time:22421ms step_avg:74.49ms +[2025-07-05 09:14:02] [Rank 0] step:321/10000 train_time:23877ms step_avg:74.38ms +[2025-07-05 09:14:02] [Rank 0] step:321/10000 train_time:23877ms step_avg:74.38ms +[2025-07-05 09:14:04] [Rank 0] step:341/10000 train_time:25497ms step_avg:74.77ms +[2025-07-05 09:14:04] [Rank 0] step:341/10000 train_time:25497ms step_avg:74.77ms +[2025-07-05 09:14:06] [Rank 0] step:361/10000 train_time:27044ms step_avg:74.92ms +[2025-07-05 09:14:06] [Rank 0] step:361/10000 train_time:27044ms step_avg:74.92ms +[2025-07-05 09:14:07] [Rank 0] step:381/10000 train_time:28852ms step_avg:75.73ms +[2025-07-05 09:14:07] [Rank 0] step:381/10000 train_time:28852ms step_avg:75.73ms +[2025-07-05 09:14:09] [Rank 0] step:401/10000 train_time:30312ms step_avg:75.59ms +[2025-07-05 09:14:09] [Rank 0] step:401/10000 train_time:30312ms step_avg:75.59ms +[2025-07-05 09:14:10] [Rank 0] step:421/10000 train_time:31768ms step_avg:75.46ms +[2025-07-05 09:14:10] [Rank 0] step:421/10000 train_time:31768ms step_avg:75.46ms +[2025-07-05 09:14:12] [Rank 0] step:441/10000 train_time:33226ms step_avg:75.34ms +[2025-07-05 09:14:12] [Rank 0] step:441/10000 train_time:33226ms step_avg:75.34ms +[2025-07-05 09:14:13] [Rank 0] step:461/10000 train_time:34917ms step_avg:75.74ms +[2025-07-05 09:14:13] [Rank 0] step:461/10000 train_time:34917ms step_avg:75.74ms +[2025-07-05 09:14:15] [Rank 0] step:481/10000 train_time:36373ms step_avg:75.62ms +[2025-07-05 09:14:15] [Rank 0] step:481/10000 train_time:36373ms step_avg:75.62ms +[2025-07-05 09:14:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:14:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:14:17] [Rank 0] PRINT: step:500/10000 train_loss:9.6491 val_loss:8.5913 train_time:37830ms step_avg:75.66ms +[2025-07-05 09:14:17] [Rank 0] PRINT: step:500/10000 train_loss:9.6491 val_loss:8.5913 train_time:37830ms step_avg:75.66ms +[2025-07-05 09:14:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:14:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7cd31743ca9fa5ce9c03670b9e01e952daed5f25 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5b6a003b-e6ef-4c66-aeb3-916fcde98bfd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_5b6a003b-e6ef-4c66-aeb3-916fcde98bfd.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_5b6a003b-e6ef-4c66-aeb3-916fcde98bfd.txt new file mode 100644 index 0000000000000000000000000000000000000000..239a982a8fee95298d1b8c57234b188896a0f5aa --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_5b6a003b-e6ef-4c66-aeb3-916fcde98bfd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:39:23] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:39:23 2025 --- +[2025-07-05 09:39:23] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:39:23 2025 --- +[2025-07-05 09:39:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:39:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:39:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:39:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:39:23] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:39:23] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:39:23] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45 +[2025-07-05 09:39:23] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45 +[2025-07-05 09:39:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:39:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:39:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:39:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:39:23] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:39:23] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:39:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:39:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:39:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:39:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:39:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:39:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:39:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:39:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:39:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:39:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:39:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:39:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:39:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:39:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:39:26] [Rank 0] PRINT: Model returns: +[2025-07-05 09:39:26] [Rank 0] PRINT: Model returns: +[2025-07-05 09:39:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:39:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:39:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:39:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:39:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:39:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:39:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:39:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:39:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:39:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:39:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:39:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:39:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:39:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:39:26] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:39:26] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:40:32] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:40:32] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:40:32] [Rank 0] PRINT: Starting training... +[2025-07-05 09:40:32] [Rank 0] PRINT: Starting training... +[2025-07-05 09:40:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:40:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:40:41] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 09:40:41] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 09:40:43] [Rank 0] step:41/10000 train_time:3205ms step_avg:78.16ms +[2025-07-05 09:40:43] [Rank 0] step:41/10000 train_time:3205ms step_avg:78.16ms +[2025-07-05 09:40:44] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 09:40:44] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 09:40:46] [Rank 0] step:81/10000 train_time:6111ms step_avg:75.44ms +[2025-07-05 09:40:46] [Rank 0] step:81/10000 train_time:6111ms step_avg:75.44ms +[2025-07-05 09:40:48] [Rank 0] step:101/10000 train_time:8224ms step_avg:81.42ms +[2025-07-05 09:40:48] [Rank 0] step:101/10000 train_time:8224ms step_avg:81.42ms +[2025-07-05 09:40:49] [Rank 0] step:121/10000 train_time:9678ms step_avg:79.98ms +[2025-07-05 09:40:49] [Rank 0] step:121/10000 train_time:9678ms step_avg:79.98ms +[2025-07-05 09:40:51] [Rank 0] step:141/10000 train_time:11133ms step_avg:78.96ms +[2025-07-05 09:40:51] [Rank 0] step:141/10000 train_time:11133ms step_avg:78.96ms +[2025-07-05 09:40:52] [Rank 0] step:161/10000 train_time:12587ms step_avg:78.18ms +[2025-07-05 09:40:52] [Rank 0] step:161/10000 train_time:12587ms step_avg:78.18ms +[2025-07-05 09:40:54] [Rank 0] step:181/10000 train_time:14298ms step_avg:78.99ms +[2025-07-05 09:40:54] [Rank 0] step:181/10000 train_time:14298ms step_avg:78.99ms +[2025-07-05 09:40:55] [Rank 0] step:201/10000 train_time:15733ms step_avg:78.27ms +[2025-07-05 09:40:55] [Rank 0] step:201/10000 train_time:15733ms step_avg:78.27ms +[2025-07-05 09:40:57] [Rank 0] step:221/10000 train_time:17188ms step_avg:77.78ms +[2025-07-05 09:40:57] [Rank 0] step:221/10000 train_time:17188ms step_avg:77.78ms +[2025-07-05 09:40:58] [Rank 0] step:241/10000 train_time:18643ms step_avg:77.36ms +[2025-07-05 09:40:58] [Rank 0] step:241/10000 train_time:18643ms step_avg:77.36ms +[2025-07-05 09:41:00] [Rank 0] step:261/10000 train_time:20098ms step_avg:77.01ms +[2025-07-05 09:41:00] [Rank 0] step:261/10000 train_time:20098ms step_avg:77.01ms +[2025-07-05 09:41:01] [Rank 0] step:281/10000 train_time:21794ms step_avg:77.56ms +[2025-07-05 09:41:01] [Rank 0] step:281/10000 train_time:21794ms step_avg:77.56ms +[2025-07-05 09:41:03] [Rank 0] step:301/10000 train_time:23248ms step_avg:77.23ms +[2025-07-05 09:41:03] [Rank 0] step:301/10000 train_time:23248ms step_avg:77.23ms +[2025-07-05 09:41:04] [Rank 0] step:321/10000 train_time:24704ms step_avg:76.96ms +[2025-07-05 09:41:04] [Rank 0] step:321/10000 train_time:24704ms step_avg:76.96ms +[2025-07-05 09:41:06] [Rank 0] step:341/10000 train_time:26161ms step_avg:76.72ms +[2025-07-05 09:41:06] [Rank 0] step:341/10000 train_time:26161ms step_avg:76.72ms +[2025-07-05 09:41:08] [Rank 0] step:361/10000 train_time:27870ms step_avg:77.20ms +[2025-07-05 09:41:08] [Rank 0] step:361/10000 train_time:27870ms step_avg:77.20ms +[2025-07-05 09:41:09] [Rank 0] step:381/10000 train_time:29717ms step_avg:78.00ms +[2025-07-05 09:41:09] [Rank 0] step:381/10000 train_time:29717ms step_avg:78.00ms +[2025-07-05 09:41:11] [Rank 0] step:401/10000 train_time:31173ms step_avg:77.74ms +[2025-07-05 09:41:11] [Rank 0] step:401/10000 train_time:31173ms step_avg:77.74ms +[2025-07-05 09:41:12] [Rank 0] step:421/10000 train_time:32632ms step_avg:77.51ms +[2025-07-05 09:41:12] [Rank 0] step:421/10000 train_time:32632ms step_avg:77.51ms +[2025-07-05 09:41:14] [Rank 0] step:441/10000 train_time:34086ms step_avg:77.29ms +[2025-07-05 09:41:14] [Rank 0] step:441/10000 train_time:34086ms step_avg:77.29ms +[2025-07-05 09:41:15] [Rank 0] step:461/10000 train_time:35781ms step_avg:77.62ms +[2025-07-05 09:41:15] [Rank 0] step:461/10000 train_time:35781ms step_avg:77.62ms +[2025-07-05 09:41:17] [Rank 0] step:481/10000 train_time:37234ms step_avg:77.41ms +[2025-07-05 09:41:17] [Rank 0] step:481/10000 train_time:37234ms step_avg:77.41ms +[2025-07-05 09:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:41:19] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5920 train_time:38692ms step_avg:77.38ms +[2025-07-05 09:41:19] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5920 train_time:38692ms step_avg:77.38ms +[2025-07-05 09:41:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:41:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0561e79c5249fff48fd5f8337b92a41e6a0a5df5 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0adf809c-f280-44f0-811f-d8d8e7f4e01a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/training_log_0adf809c-f280-44f0-811f-d8d8e7f4e01a.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/training_log_0adf809c-f280-44f0-811f-d8d8e7f4e01a.txt new file mode 100644 index 0000000000000000000000000000000000000000..30a39d5c2b9e9a816abb89bd79a4eb8a7fb0b13b --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46/training_log_0adf809c-f280-44f0-811f-d8d8e7f4e01a.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:06:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:06:39 2025 --- +[2025-07-05 10:06:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:06:39 2025 --- +[2025-07-05 10:06:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:06:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:06:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:06:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:06:39] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:06:39] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:06:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46 +[2025-07-05 10:06:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_46 +[2025-07-05 10:06:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:06:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:06:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:06:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:06:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:06:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:06:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:06:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:06:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:06:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:06:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:06:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:06:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:06:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:06:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:06:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:06:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:06:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:06:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:06:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:06:43] [Rank 0] PRINT: Model returns: +[2025-07-05 10:06:43] [Rank 0] PRINT: Model returns: +[2025-07-05 10:06:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:06:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:06:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:06:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:06:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:06:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:06:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:06:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:06:43] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:06:43] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:06:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:06:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:06:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:06:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:06:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:06:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:07:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:07:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:07:51] [Rank 0] PRINT: Starting training... +[2025-07-05 10:07:51] [Rank 0] PRINT: Starting training... +[2025-07-05 10:07:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:07:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:07:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:07:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:08:01] [Rank 0] step:21/10000 train_time:1719ms step_avg:81.85ms +[2025-07-05 10:08:01] [Rank 0] step:21/10000 train_time:1719ms step_avg:81.85ms +[2025-07-05 10:08:02] [Rank 0] step:41/10000 train_time:3169ms step_avg:77.29ms +[2025-07-05 10:08:02] [Rank 0] step:41/10000 train_time:3169ms step_avg:77.29ms +[2025-07-05 10:08:03] [Rank 0] step:61/10000 train_time:4621ms step_avg:75.76ms +[2025-07-05 10:08:03] [Rank 0] step:61/10000 train_time:4621ms step_avg:75.76ms +[2025-07-05 10:08:05] [Rank 0] step:81/10000 train_time:6074ms step_avg:74.99ms +[2025-07-05 10:08:05] [Rank 0] step:81/10000 train_time:6074ms step_avg:74.99ms +[2025-07-05 10:08:06] [Rank 0] step:101/10000 train_time:7563ms step_avg:74.88ms +[2025-07-05 10:08:06] [Rank 0] step:101/10000 train_time:7563ms step_avg:74.88ms +[2025-07-05 10:08:08] [Rank 0] step:121/10000 train_time:9019ms step_avg:74.54ms +[2025-07-05 10:08:08] [Rank 0] step:121/10000 train_time:9019ms step_avg:74.54ms +[2025-07-05 10:08:09] [Rank 0] step:141/10000 train_time:10471ms step_avg:74.26ms +[2025-07-05 10:08:09] [Rank 0] step:141/10000 train_time:10471ms step_avg:74.26ms +[2025-07-05 10:08:11] [Rank 0] step:161/10000 train_time:11925ms step_avg:74.07ms +[2025-07-05 10:08:11] [Rank 0] step:161/10000 train_time:11925ms step_avg:74.07ms +[2025-07-05 10:08:12] [Rank 0] step:181/10000 train_time:13379ms step_avg:73.92ms +[2025-07-05 10:08:12] [Rank 0] step:181/10000 train_time:13379ms step_avg:73.92ms +[2025-07-05 10:08:14] [Rank 0] step:201/10000 train_time:15067ms step_avg:74.96ms +[2025-07-05 10:08:14] [Rank 0] step:201/10000 train_time:15067ms step_avg:74.96ms +[2025-07-05 10:08:15] [Rank 0] step:221/10000 train_time:16522ms step_avg:74.76ms +[2025-07-05 10:08:15] [Rank 0] step:221/10000 train_time:16522ms step_avg:74.76ms +[2025-07-05 10:08:17] [Rank 0] step:241/10000 train_time:17974ms step_avg:74.58ms +[2025-07-05 10:08:17] [Rank 0] step:241/10000 train_time:17974ms step_avg:74.58ms +[2025-07-05 10:08:18] [Rank 0] step:261/10000 train_time:19429ms step_avg:74.44ms +[2025-07-05 10:08:18] [Rank 0] step:261/10000 train_time:19429ms step_avg:74.44ms +[2025-07-05 10:08:20] [Rank 0] step:281/10000 train_time:20920ms step_avg:74.45ms +[2025-07-05 10:08:20] [Rank 0] step:281/10000 train_time:20920ms step_avg:74.45ms +[2025-07-05 10:08:21] [Rank 0] step:301/10000 train_time:22375ms step_avg:74.34ms +[2025-07-05 10:08:21] [Rank 0] step:301/10000 train_time:22375ms step_avg:74.34ms +[2025-07-05 10:08:23] [Rank 0] step:321/10000 train_time:23832ms step_avg:74.24ms +[2025-07-05 10:08:23] [Rank 0] step:321/10000 train_time:23832ms step_avg:74.24ms +[2025-07-05 10:08:24] [Rank 0] step:341/10000 train_time:25288ms step_avg:74.16ms +[2025-07-05 10:08:24] [Rank 0] step:341/10000 train_time:25288ms step_avg:74.16ms +[2025-07-05 10:08:26] [Rank 0] step:361/10000 train_time:26742ms step_avg:74.08ms +[2025-07-05 10:08:26] [Rank 0] step:361/10000 train_time:26742ms step_avg:74.08ms +[2025-07-05 10:08:27] [Rank 0] step:381/10000 train_time:28434ms step_avg:74.63ms +[2025-07-05 10:08:27] [Rank 0] step:381/10000 train_time:28434ms step_avg:74.63ms +[2025-07-05 10:08:29] [Rank 0] step:401/10000 train_time:29891ms step_avg:74.54ms +[2025-07-05 10:08:29] [Rank 0] step:401/10000 train_time:29891ms step_avg:74.54ms +[2025-07-05 10:08:30] [Rank 0] step:421/10000 train_time:31347ms step_avg:74.46ms +[2025-07-05 10:08:30] [Rank 0] step:421/10000 train_time:31347ms step_avg:74.46ms +[2025-07-05 10:08:32] [Rank 0] step:441/10000 train_time:32807ms step_avg:74.39ms +[2025-07-05 10:08:32] [Rank 0] step:441/10000 train_time:32807ms step_avg:74.39ms +[2025-07-05 10:08:33] [Rank 0] step:461/10000 train_time:34500ms step_avg:74.84ms +[2025-07-05 10:08:33] [Rank 0] step:461/10000 train_time:34500ms step_avg:74.84ms +[2025-07-05 10:08:35] [Rank 0] step:481/10000 train_time:35957ms step_avg:74.75ms +[2025-07-05 10:08:35] [Rank 0] step:481/10000 train_time:35957ms step_avg:74.75ms +[2025-07-05 10:08:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:08:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:08:37] [Rank 0] PRINT: step:500/10000 train_loss:9.6496 val_loss:8.5923 train_time:37412ms step_avg:74.82ms +[2025-07-05 10:08:37] [Rank 0] PRINT: step:500/10000 train_loss:9.6496 val_loss:8.5923 train_time:37412ms step_avg:74.82ms +[2025-07-05 10:08:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:08:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4abc529c461bf61272bd57d8873e4894c38f13de --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "011f792e-df42-4518-9e73-406f1f237160", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/training_log_011f792e-df42-4518-9e73-406f1f237160.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/training_log_011f792e-df42-4518-9e73-406f1f237160.txt new file mode 100644 index 0000000000000000000000000000000000000000..afb4eca691196e3b5ca9465f58029703fdac4fef --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47/training_log_011f792e-df42-4518-9e73-406f1f237160.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:26:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:26:04 2025 --- +[2025-07-05 08:26:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:26:04 2025 --- +[2025-07-05 08:26:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:26:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:26:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:26:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:26:04] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:26:04] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:26:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47 +[2025-07-05 08:26:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_47 +[2025-07-05 08:26:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:26:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:26:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:26:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:26:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:26:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:26:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:26:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:26:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:26:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:26:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:26:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:26:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:26:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:26:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:26:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:26:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:26:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:26:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:26:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:26:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:26:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:26:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:26:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:26:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:26:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:26:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:26:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:26:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:26:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:26:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:26:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:26:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:26:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:26:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:26:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:26:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:26:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:27:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:27:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:27:12] [Rank 0] PRINT: Starting training... +[2025-07-05 08:27:12] [Rank 0] PRINT: Starting training... +[2025-07-05 08:27:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:27:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:27:21] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.68ms +[2025-07-05 08:27:21] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.68ms +[2025-07-05 08:27:23] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.25ms +[2025-07-05 08:27:23] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.25ms +[2025-07-05 08:27:24] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.42ms +[2025-07-05 08:27:24] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.42ms +[2025-07-05 08:27:26] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.50ms +[2025-07-05 08:27:26] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.50ms +[2025-07-05 08:27:27] [Rank 0] step:101/10000 train_time:7808ms step_avg:77.31ms +[2025-07-05 08:27:27] [Rank 0] step:101/10000 train_time:7808ms step_avg:77.31ms +[2025-07-05 08:27:29] [Rank 0] step:121/10000 train_time:9495ms step_avg:78.47ms +[2025-07-05 08:27:29] [Rank 0] step:121/10000 train_time:9495ms step_avg:78.47ms +[2025-07-05 08:27:30] [Rank 0] step:141/10000 train_time:10966ms step_avg:77.78ms +[2025-07-05 08:27:30] [Rank 0] step:141/10000 train_time:10966ms step_avg:77.78ms +[2025-07-05 08:27:32] [Rank 0] step:161/10000 train_time:12421ms step_avg:77.15ms +[2025-07-05 08:27:32] [Rank 0] step:161/10000 train_time:12421ms step_avg:77.15ms +[2025-07-05 08:27:34] [Rank 0] step:181/10000 train_time:13876ms step_avg:76.66ms +[2025-07-05 08:27:34] [Rank 0] step:181/10000 train_time:13876ms step_avg:76.66ms +[2025-07-05 08:27:35] [Rank 0] step:201/10000 train_time:15987ms step_avg:79.54ms +[2025-07-05 08:27:35] [Rank 0] step:201/10000 train_time:15987ms step_avg:79.54ms +[2025-07-05 08:27:37] [Rank 0] step:221/10000 train_time:17442ms step_avg:78.92ms +[2025-07-05 08:27:37] [Rank 0] step:221/10000 train_time:17442ms step_avg:78.92ms +[2025-07-05 08:27:38] [Rank 0] step:241/10000 train_time:18899ms step_avg:78.42ms +[2025-07-05 08:27:38] [Rank 0] step:241/10000 train_time:18899ms step_avg:78.42ms +[2025-07-05 08:27:40] [Rank 0] step:261/10000 train_time:20354ms step_avg:77.98ms +[2025-07-05 08:27:40] [Rank 0] step:261/10000 train_time:20354ms step_avg:77.98ms +[2025-07-05 08:27:42] [Rank 0] step:281/10000 train_time:22150ms step_avg:78.83ms +[2025-07-05 08:27:42] [Rank 0] step:281/10000 train_time:22150ms step_avg:78.83ms +[2025-07-05 08:27:43] [Rank 0] step:301/10000 train_time:23604ms step_avg:78.42ms +[2025-07-05 08:27:43] [Rank 0] step:301/10000 train_time:23604ms step_avg:78.42ms +[2025-07-05 08:27:44] [Rank 0] step:321/10000 train_time:25060ms step_avg:78.07ms +[2025-07-05 08:27:44] [Rank 0] step:321/10000 train_time:25060ms step_avg:78.07ms +[2025-07-05 08:27:46] [Rank 0] step:341/10000 train_time:26517ms step_avg:77.76ms +[2025-07-05 08:27:46] [Rank 0] step:341/10000 train_time:26517ms step_avg:77.76ms +[2025-07-05 08:27:48] [Rank 0] step:361/10000 train_time:28232ms step_avg:78.20ms +[2025-07-05 08:27:48] [Rank 0] step:361/10000 train_time:28232ms step_avg:78.20ms +[2025-07-05 08:27:50] [Rank 0] step:381/10000 train_time:30101ms step_avg:79.01ms +[2025-07-05 08:27:50] [Rank 0] step:381/10000 train_time:30101ms step_avg:79.01ms +[2025-07-05 08:27:51] [Rank 0] step:401/10000 train_time:31558ms step_avg:78.70ms +[2025-07-05 08:27:51] [Rank 0] step:401/10000 train_time:31558ms step_avg:78.70ms +[2025-07-05 08:27:52] [Rank 0] step:421/10000 train_time:33014ms step_avg:78.42ms +[2025-07-05 08:27:52] [Rank 0] step:421/10000 train_time:33014ms step_avg:78.42ms +[2025-07-05 08:27:54] [Rank 0] step:441/10000 train_time:34471ms step_avg:78.17ms +[2025-07-05 08:27:54] [Rank 0] step:441/10000 train_time:34471ms step_avg:78.17ms +[2025-07-05 08:27:56] [Rank 0] step:461/10000 train_time:36165ms step_avg:78.45ms +[2025-07-05 08:27:56] [Rank 0] step:461/10000 train_time:36165ms step_avg:78.45ms +[2025-07-05 08:27:57] [Rank 0] step:481/10000 train_time:37623ms step_avg:78.22ms +[2025-07-05 08:27:57] [Rank 0] step:481/10000 train_time:37623ms step_avg:78.22ms +[2025-07-05 08:27:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:59] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5920 train_time:39081ms step_avg:78.16ms +[2025-07-05 08:27:59] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5920 train_time:39081ms step_avg:78.16ms +[2025-07-05 08:27:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:27:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2f44b7da84538d4630967514d94fdd110941b811 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "75983bd3-9ae1-4e72-be7c-38a1014c18fb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_75983bd3-9ae1-4e72-be7c-38a1014c18fb.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_75983bd3-9ae1-4e72-be7c-38a1014c18fb.txt new file mode 100644 index 0000000000000000000000000000000000000000..f753c7c348efb78975cb930220986e850b554534 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_75983bd3-9ae1-4e72-be7c-38a1014c18fb.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:53:25] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:53:25 2025 --- +[2025-07-05 08:53:25] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:53:25 2025 --- +[2025-07-05 08:53:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:53:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:53:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:53:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:53:25] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:53:25] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:53:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48 +[2025-07-05 08:53:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48 +[2025-07-05 08:53:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:53:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:53:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:53:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:53:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:53:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:53:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:53:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:53:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:53:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:53:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:53:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:53:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:53:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:53:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:53:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:53:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:53:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:53:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:53:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:53:28] [Rank 0] PRINT: Model returns: +[2025-07-05 08:53:28] [Rank 0] PRINT: Model returns: +[2025-07-05 08:53:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:53:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:53:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:53:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:53:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:53:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:53:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:53:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:53:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:53:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:53:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:53:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:53:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:53:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:53:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:53:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:54:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:54:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:54:34] [Rank 0] PRINT: Starting training... +[2025-07-05 08:54:34] [Rank 0] PRINT: Starting training... +[2025-07-05 08:54:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:54:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:54:41] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:54:41] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:54:43] [Rank 0] step:21/10000 train_time:1751ms step_avg:83.38ms +[2025-07-05 08:54:43] [Rank 0] step:21/10000 train_time:1751ms step_avg:83.38ms +[2025-07-05 08:54:45] [Rank 0] step:41/10000 train_time:3200ms step_avg:78.06ms +[2025-07-05 08:54:45] [Rank 0] step:41/10000 train_time:3200ms step_avg:78.06ms +[2025-07-05 08:54:46] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.26ms +[2025-07-05 08:54:46] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.26ms +[2025-07-05 08:54:48] [Rank 0] step:81/10000 train_time:6103ms step_avg:75.35ms +[2025-07-05 08:54:48] [Rank 0] step:81/10000 train_time:6103ms step_avg:75.35ms +[2025-07-05 08:54:50] [Rank 0] step:101/10000 train_time:8215ms step_avg:81.34ms +[2025-07-05 08:54:50] [Rank 0] step:101/10000 train_time:8215ms step_avg:81.34ms +[2025-07-05 08:54:51] [Rank 0] step:121/10000 train_time:9667ms step_avg:79.89ms +[2025-07-05 08:54:51] [Rank 0] step:121/10000 train_time:9667ms step_avg:79.89ms +[2025-07-05 08:54:53] [Rank 0] step:141/10000 train_time:11119ms step_avg:78.86ms +[2025-07-05 08:54:53] [Rank 0] step:141/10000 train_time:11119ms step_avg:78.86ms +[2025-07-05 08:54:54] [Rank 0] step:161/10000 train_time:12820ms step_avg:79.63ms +[2025-07-05 08:54:54] [Rank 0] step:161/10000 train_time:12820ms step_avg:79.63ms +[2025-07-05 08:54:56] [Rank 0] step:181/10000 train_time:14381ms step_avg:79.45ms +[2025-07-05 08:54:56] [Rank 0] step:181/10000 train_time:14381ms step_avg:79.45ms +[2025-07-05 08:54:58] [Rank 0] step:201/10000 train_time:16029ms step_avg:79.75ms +[2025-07-05 08:54:58] [Rank 0] step:201/10000 train_time:16029ms step_avg:79.75ms +[2025-07-05 08:54:59] [Rank 0] step:221/10000 train_time:17481ms step_avg:79.10ms +[2025-07-05 08:54:59] [Rank 0] step:221/10000 train_time:17481ms step_avg:79.10ms +[2025-07-05 08:55:00] [Rank 0] step:241/10000 train_time:18935ms step_avg:78.57ms +[2025-07-05 08:55:00] [Rank 0] step:241/10000 train_time:18935ms step_avg:78.57ms +[2025-07-05 08:55:02] [Rank 0] step:261/10000 train_time:20391ms step_avg:78.13ms +[2025-07-05 08:55:02] [Rank 0] step:261/10000 train_time:20391ms step_avg:78.13ms +[2025-07-05 08:55:04] [Rank 0] step:281/10000 train_time:22522ms step_avg:80.15ms +[2025-07-05 08:55:04] [Rank 0] step:281/10000 train_time:22522ms step_avg:80.15ms +[2025-07-05 08:55:06] [Rank 0] step:301/10000 train_time:23975ms step_avg:79.65ms +[2025-07-05 08:55:06] [Rank 0] step:301/10000 train_time:23975ms step_avg:79.65ms +[2025-07-05 08:55:07] [Rank 0] step:321/10000 train_time:25432ms step_avg:79.23ms +[2025-07-05 08:55:07] [Rank 0] step:321/10000 train_time:25432ms step_avg:79.23ms +[2025-07-05 08:55:08] [Rank 0] step:341/10000 train_time:26887ms step_avg:78.85ms +[2025-07-05 08:55:08] [Rank 0] step:341/10000 train_time:26887ms step_avg:78.85ms +[2025-07-05 08:55:11] [Rank 0] step:361/10000 train_time:28598ms step_avg:79.22ms +[2025-07-05 08:55:11] [Rank 0] step:361/10000 train_time:28598ms step_avg:79.22ms +[2025-07-05 08:55:12] [Rank 0] step:381/10000 train_time:30450ms step_avg:79.92ms +[2025-07-05 08:55:12] [Rank 0] step:381/10000 train_time:30450ms step_avg:79.92ms +[2025-07-05 08:55:13] [Rank 0] step:401/10000 train_time:31907ms step_avg:79.57ms +[2025-07-05 08:55:13] [Rank 0] step:401/10000 train_time:31907ms step_avg:79.57ms +[2025-07-05 08:55:15] [Rank 0] step:421/10000 train_time:33365ms step_avg:79.25ms +[2025-07-05 08:55:15] [Rank 0] step:421/10000 train_time:33365ms step_avg:79.25ms +[2025-07-05 08:55:16] [Rank 0] step:441/10000 train_time:34819ms step_avg:78.95ms +[2025-07-05 08:55:16] [Rank 0] step:441/10000 train_time:34819ms step_avg:78.95ms +[2025-07-05 08:55:18] [Rank 0] step:461/10000 train_time:36919ms step_avg:80.08ms +[2025-07-05 08:55:18] [Rank 0] step:461/10000 train_time:36919ms step_avg:80.08ms +[2025-07-05 08:55:20] [Rank 0] step:481/10000 train_time:38372ms step_avg:79.78ms +[2025-07-05 08:55:20] [Rank 0] step:481/10000 train_time:38372ms step_avg:79.78ms +[2025-07-05 08:55:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:55:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:55:22] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5918 train_time:39828ms step_avg:79.66ms +[2025-07-05 08:55:22] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5918 train_time:39828ms step_avg:79.66ms +[2025-07-05 08:55:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:55:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c07f3822d8c44749f14177e94224e6ff34edcabc --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "7874f529-65f2-4224-8ce7-a12a0f947335", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/training_log_7874f529-65f2-4224-8ce7-a12a0f947335.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/training_log_7874f529-65f2-4224-8ce7-a12a0f947335.txt new file mode 100644 index 0000000000000000000000000000000000000000..63091bd8cd3e344587ecdb14eb24b63cebfed454 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49/training_log_7874f529-65f2-4224-8ce7-a12a0f947335.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:21:00] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:21:00 2025 --- +[2025-07-05 09:21:00] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:21:00 2025 --- +[2025-07-05 09:21:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:21:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:21:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:21:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:21:00] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:21:00] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:21:00] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49 +[2025-07-05 09:21:00] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_49 +[2025-07-05 09:21:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:21:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:21:00] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:21:00] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:21:00] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:21:00] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:21:02] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:21:02] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:21:02] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:21:02] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:21:02] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:21:02] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:21:03] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:21:03] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:21:03] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:21:03] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:21:03] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:21:03] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:21:03] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:21:03] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:21:03] [Rank 0] PRINT: Model returns: +[2025-07-05 09:21:03] [Rank 0] PRINT: Model returns: +[2025-07-05 09:21:03] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:21:03] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:21:03] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:21:03] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:21:03] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:21:03] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:21:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:21:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:21:03] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:21:03] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:21:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:21:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:21:03] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:21:03] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:21:03] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:21:03] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:22:08] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:22:08] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:22:08] [Rank 0] PRINT: Starting training... +[2025-07-05 09:22:08] [Rank 0] PRINT: Starting training... +[2025-07-05 09:22:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:22:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:22:17] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 09:22:17] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 09:22:19] [Rank 0] step:41/10000 train_time:3251ms step_avg:79.30ms +[2025-07-05 09:22:19] [Rank 0] step:41/10000 train_time:3251ms step_avg:79.30ms +[2025-07-05 09:22:20] [Rank 0] step:61/10000 train_time:4701ms step_avg:77.07ms +[2025-07-05 09:22:20] [Rank 0] step:61/10000 train_time:4701ms step_avg:77.07ms +[2025-07-05 09:22:22] [Rank 0] step:81/10000 train_time:6153ms step_avg:75.96ms +[2025-07-05 09:22:22] [Rank 0] step:81/10000 train_time:6153ms step_avg:75.96ms +[2025-07-05 09:22:23] [Rank 0] step:101/10000 train_time:7847ms step_avg:77.69ms +[2025-07-05 09:22:23] [Rank 0] step:101/10000 train_time:7847ms step_avg:77.69ms +[2025-07-05 09:22:25] [Rank 0] step:121/10000 train_time:9300ms step_avg:76.86ms +[2025-07-05 09:22:25] [Rank 0] step:121/10000 train_time:9300ms step_avg:76.86ms +[2025-07-05 09:22:26] [Rank 0] step:141/10000 train_time:10751ms step_avg:76.25ms +[2025-07-05 09:22:26] [Rank 0] step:141/10000 train_time:10751ms step_avg:76.25ms +[2025-07-05 09:22:28] [Rank 0] step:161/10000 train_time:12204ms step_avg:75.80ms +[2025-07-05 09:22:28] [Rank 0] step:161/10000 train_time:12204ms step_avg:75.80ms +[2025-07-05 09:22:29] [Rank 0] step:181/10000 train_time:13917ms step_avg:76.89ms +[2025-07-05 09:22:29] [Rank 0] step:181/10000 train_time:13917ms step_avg:76.89ms +[2025-07-05 09:22:31] [Rank 0] step:201/10000 train_time:15453ms step_avg:76.88ms +[2025-07-05 09:22:31] [Rank 0] step:201/10000 train_time:15453ms step_avg:76.88ms +[2025-07-05 09:22:32] [Rank 0] step:221/10000 train_time:16907ms step_avg:76.50ms +[2025-07-05 09:22:32] [Rank 0] step:221/10000 train_time:16907ms step_avg:76.50ms +[2025-07-05 09:22:34] [Rank 0] step:241/10000 train_time:18363ms step_avg:76.20ms +[2025-07-05 09:22:34] [Rank 0] step:241/10000 train_time:18363ms step_avg:76.20ms +[2025-07-05 09:22:35] [Rank 0] step:261/10000 train_time:19820ms step_avg:75.94ms +[2025-07-05 09:22:35] [Rank 0] step:261/10000 train_time:19820ms step_avg:75.94ms +[2025-07-05 09:22:37] [Rank 0] step:281/10000 train_time:21918ms step_avg:78.00ms +[2025-07-05 09:22:37] [Rank 0] step:281/10000 train_time:21918ms step_avg:78.00ms +[2025-07-05 09:22:39] [Rank 0] step:301/10000 train_time:23376ms step_avg:77.66ms +[2025-07-05 09:22:39] [Rank 0] step:301/10000 train_time:23376ms step_avg:77.66ms +[2025-07-05 09:22:40] [Rank 0] step:321/10000 train_time:24830ms step_avg:77.35ms +[2025-07-05 09:22:40] [Rank 0] step:321/10000 train_time:24830ms step_avg:77.35ms +[2025-07-05 09:22:42] [Rank 0] step:341/10000 train_time:26286ms step_avg:77.09ms +[2025-07-05 09:22:42] [Rank 0] step:341/10000 train_time:26286ms step_avg:77.09ms +[2025-07-05 09:22:44] [Rank 0] step:361/10000 train_time:27798ms step_avg:77.00ms +[2025-07-05 09:22:44] [Rank 0] step:361/10000 train_time:27798ms step_avg:77.00ms +[2025-07-05 09:22:45] [Rank 0] step:381/10000 train_time:29851ms step_avg:78.35ms +[2025-07-05 09:22:45] [Rank 0] step:381/10000 train_time:29851ms step_avg:78.35ms +[2025-07-05 09:22:47] [Rank 0] step:401/10000 train_time:31305ms step_avg:78.07ms +[2025-07-05 09:22:47] [Rank 0] step:401/10000 train_time:31305ms step_avg:78.07ms +[2025-07-05 09:22:48] [Rank 0] step:421/10000 train_time:32761ms step_avg:77.82ms +[2025-07-05 09:22:48] [Rank 0] step:421/10000 train_time:32761ms step_avg:77.82ms +[2025-07-05 09:22:50] [Rank 0] step:441/10000 train_time:34218ms step_avg:77.59ms +[2025-07-05 09:22:50] [Rank 0] step:441/10000 train_time:34218ms step_avg:77.59ms +[2025-07-05 09:22:51] [Rank 0] step:461/10000 train_time:35909ms step_avg:77.89ms +[2025-07-05 09:22:51] [Rank 0] step:461/10000 train_time:35909ms step_avg:77.89ms +[2025-07-05 09:22:53] [Rank 0] step:481/10000 train_time:37364ms step_avg:77.68ms +[2025-07-05 09:22:53] [Rank 0] step:481/10000 train_time:37364ms step_avg:77.68ms +[2025-07-05 09:22:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:55] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5922 train_time:38818ms step_avg:77.64ms +[2025-07-05 09:22:55] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5922 train_time:38818ms step_avg:77.64ms +[2025-07-05 09:22:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:22:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0e2c01f211cb51135bb6b11b0979d9ed04636c78 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e1a67ade-97f7-43f6-8a72-9a090c241bf5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/training_log_e1a67ade-97f7-43f6-8a72-9a090c241bf5.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/training_log_e1a67ade-97f7-43f6-8a72-9a090c241bf5.txt new file mode 100644 index 0000000000000000000000000000000000000000..250937d7dcf5006cd7038555b6a72c4a4437cf04 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50/training_log_e1a67ade-97f7-43f6-8a72-9a090c241bf5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:48:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:48:21 2025 --- +[2025-07-05 09:48:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:48:21 2025 --- +[2025-07-05 09:48:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:48:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:48:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:48:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:48:21] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:48:21] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:48:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50 +[2025-07-05 09:48:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_50 +[2025-07-05 09:48:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:48:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:48:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:48:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:48:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:48:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:48:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:48:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:48:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:48:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:48:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:48:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:48:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:48:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:48:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:48:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:48:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:48:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:48:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:48:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:48:25] [Rank 0] PRINT: Model returns: +[2025-07-05 09:48:25] [Rank 0] PRINT: Model returns: +[2025-07-05 09:48:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:48:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:48:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:48:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:48:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:48:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:48:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:48:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:48:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:48:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:48:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:48:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:48:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:48:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:48:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:48:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:49:29] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:49:29] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:49:29] [Rank 0] PRINT: Starting training... +[2025-07-05 09:49:29] [Rank 0] PRINT: Starting training... +[2025-07-05 09:49:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:49:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:49:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:49:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:49:38] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 09:49:38] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 09:49:40] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.17ms +[2025-07-05 09:49:40] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.17ms +[2025-07-05 09:49:41] [Rank 0] step:61/10000 train_time:4450ms step_avg:72.95ms +[2025-07-05 09:49:41] [Rank 0] step:61/10000 train_time:4450ms step_avg:72.95ms +[2025-07-05 09:49:43] [Rank 0] step:81/10000 train_time:6156ms step_avg:76.00ms +[2025-07-05 09:49:43] [Rank 0] step:81/10000 train_time:6156ms step_avg:76.00ms +[2025-07-05 09:49:45] [Rank 0] step:101/10000 train_time:8283ms step_avg:82.01ms +[2025-07-05 09:49:45] [Rank 0] step:101/10000 train_time:8283ms step_avg:82.01ms +[2025-07-05 09:49:47] [Rank 0] step:121/10000 train_time:9733ms step_avg:80.44ms +[2025-07-05 09:49:47] [Rank 0] step:121/10000 train_time:9733ms step_avg:80.44ms +[2025-07-05 09:49:48] [Rank 0] step:141/10000 train_time:11184ms step_avg:79.32ms +[2025-07-05 09:49:48] [Rank 0] step:141/10000 train_time:11184ms step_avg:79.32ms +[2025-07-05 09:49:49] [Rank 0] step:161/10000 train_time:12639ms step_avg:78.50ms +[2025-07-05 09:49:49] [Rank 0] step:161/10000 train_time:12639ms step_avg:78.50ms +[2025-07-05 09:49:52] [Rank 0] step:181/10000 train_time:14781ms step_avg:81.66ms +[2025-07-05 09:49:52] [Rank 0] step:181/10000 train_time:14781ms step_avg:81.66ms +[2025-07-05 09:49:53] [Rank 0] step:201/10000 train_time:16212ms step_avg:80.66ms +[2025-07-05 09:49:53] [Rank 0] step:201/10000 train_time:16212ms step_avg:80.66ms +[2025-07-05 09:49:54] [Rank 0] step:221/10000 train_time:17668ms step_avg:79.95ms +[2025-07-05 09:49:54] [Rank 0] step:221/10000 train_time:17668ms step_avg:79.95ms +[2025-07-05 09:49:56] [Rank 0] step:241/10000 train_time:19120ms step_avg:79.34ms +[2025-07-05 09:49:56] [Rank 0] step:241/10000 train_time:19120ms step_avg:79.34ms +[2025-07-05 09:49:57] [Rank 0] step:261/10000 train_time:20575ms step_avg:78.83ms +[2025-07-05 09:49:57] [Rank 0] step:261/10000 train_time:20575ms step_avg:78.83ms +[2025-07-05 09:50:00] [Rank 0] step:281/10000 train_time:22678ms step_avg:80.70ms +[2025-07-05 09:50:00] [Rank 0] step:281/10000 train_time:22678ms step_avg:80.70ms +[2025-07-05 09:50:01] [Rank 0] step:301/10000 train_time:24139ms step_avg:80.20ms +[2025-07-05 09:50:01] [Rank 0] step:301/10000 train_time:24139ms step_avg:80.20ms +[2025-07-05 09:50:02] [Rank 0] step:321/10000 train_time:25593ms step_avg:79.73ms +[2025-07-05 09:50:02] [Rank 0] step:321/10000 train_time:25593ms step_avg:79.73ms +[2025-07-05 09:50:04] [Rank 0] step:341/10000 train_time:27048ms step_avg:79.32ms +[2025-07-05 09:50:04] [Rank 0] step:341/10000 train_time:27048ms step_avg:79.32ms +[2025-07-05 09:50:06] [Rank 0] step:361/10000 train_time:28769ms step_avg:79.69ms +[2025-07-05 09:50:06] [Rank 0] step:361/10000 train_time:28769ms step_avg:79.69ms +[2025-07-05 09:50:07] [Rank 0] step:381/10000 train_time:30204ms step_avg:79.27ms +[2025-07-05 09:50:07] [Rank 0] step:381/10000 train_time:30204ms step_avg:79.27ms +[2025-07-05 09:50:08] [Rank 0] step:401/10000 train_time:31657ms step_avg:78.94ms +[2025-07-05 09:50:08] [Rank 0] step:401/10000 train_time:31657ms step_avg:78.94ms +[2025-07-05 09:50:10] [Rank 0] step:421/10000 train_time:33113ms step_avg:78.65ms +[2025-07-05 09:50:10] [Rank 0] step:421/10000 train_time:33113ms step_avg:78.65ms +[2025-07-05 09:50:11] [Rank 0] step:441/10000 train_time:34568ms step_avg:78.39ms +[2025-07-05 09:50:11] [Rank 0] step:441/10000 train_time:34568ms step_avg:78.39ms +[2025-07-05 09:50:14] [Rank 0] step:461/10000 train_time:36698ms step_avg:79.61ms +[2025-07-05 09:50:14] [Rank 0] step:461/10000 train_time:36698ms step_avg:79.61ms +[2025-07-05 09:50:15] [Rank 0] step:481/10000 train_time:38155ms step_avg:79.32ms +[2025-07-05 09:50:15] [Rank 0] step:481/10000 train_time:38155ms step_avg:79.32ms +[2025-07-05 09:50:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:50:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:50:17] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5920 train_time:39610ms step_avg:79.22ms +[2025-07-05 09:50:17] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5920 train_time:39610ms step_avg:79.22ms +[2025-07-05 09:50:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:50:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..57baa5e2005f3f71b1167c0e5f9c0956ca3332d0 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "36d6efac-92ab-4664-ae7a-3b49d4bd29ea", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/training_log_36d6efac-92ab-4664-ae7a-3b49d4bd29ea.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/training_log_36d6efac-92ab-4664-ae7a-3b49d4bd29ea.txt new file mode 100644 index 0000000000000000000000000000000000000000..e06e9d7e32c4924f722b7484ac9668dd386f53b3 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51/training_log_36d6efac-92ab-4664-ae7a-3b49d4bd29ea.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:15:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:15:46 2025 --- +[2025-07-05 10:15:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:15:46 2025 --- +[2025-07-05 10:15:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:15:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:15:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:15:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:15:46] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:15:46] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:15:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51 +[2025-07-05 10:15:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_51 +[2025-07-05 10:15:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:15:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:15:47] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:15:47] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:15:47] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:15:47] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:15:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:15:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:15:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:15:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:15:49] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:15:49] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:15:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:15:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:15:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:15:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:15:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:15:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:15:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:15:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:15:50] [Rank 0] PRINT: Model returns: +[2025-07-05 10:15:50] [Rank 0] PRINT: Model returns: +[2025-07-05 10:15:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:15:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:15:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:15:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:15:50] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:15:50] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:15:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:15:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:15:50] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:15:50] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:15:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:15:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:15:50] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:15:50] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:15:50] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:15:50] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:16:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:16:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:16:55] [Rank 0] PRINT: Starting training... +[2025-07-05 10:16:55] [Rank 0] PRINT: Starting training... +[2025-07-05 10:16:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:16:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:17:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:17:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:17:04] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-05 10:17:04] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-05 10:17:06] [Rank 0] step:41/10000 train_time:3199ms step_avg:78.01ms +[2025-07-05 10:17:06] [Rank 0] step:41/10000 train_time:3199ms step_avg:78.01ms +[2025-07-05 10:17:07] [Rank 0] step:61/10000 train_time:4761ms step_avg:78.04ms +[2025-07-05 10:17:07] [Rank 0] step:61/10000 train_time:4761ms step_avg:78.04ms +[2025-07-05 10:17:09] [Rank 0] step:81/10000 train_time:6391ms step_avg:78.91ms +[2025-07-05 10:17:09] [Rank 0] step:81/10000 train_time:6391ms step_avg:78.91ms +[2025-07-05 10:17:11] [Rank 0] step:101/10000 train_time:8497ms step_avg:84.13ms +[2025-07-05 10:17:11] [Rank 0] step:101/10000 train_time:8497ms step_avg:84.13ms +[2025-07-05 10:17:12] [Rank 0] step:121/10000 train_time:9947ms step_avg:82.21ms +[2025-07-05 10:17:12] [Rank 0] step:121/10000 train_time:9947ms step_avg:82.21ms +[2025-07-05 10:17:14] [Rank 0] step:141/10000 train_time:11398ms step_avg:80.84ms +[2025-07-05 10:17:14] [Rank 0] step:141/10000 train_time:11398ms step_avg:80.84ms +[2025-07-05 10:17:15] [Rank 0] step:161/10000 train_time:12851ms step_avg:79.82ms +[2025-07-05 10:17:15] [Rank 0] step:161/10000 train_time:12851ms step_avg:79.82ms +[2025-07-05 10:17:17] [Rank 0] step:181/10000 train_time:14561ms step_avg:80.45ms +[2025-07-05 10:17:17] [Rank 0] step:181/10000 train_time:14561ms step_avg:80.45ms +[2025-07-05 10:17:19] [Rank 0] step:201/10000 train_time:16423ms step_avg:81.70ms +[2025-07-05 10:17:19] [Rank 0] step:201/10000 train_time:16423ms step_avg:81.70ms +[2025-07-05 10:17:20] [Rank 0] step:221/10000 train_time:17878ms step_avg:80.89ms +[2025-07-05 10:17:20] [Rank 0] step:221/10000 train_time:17878ms step_avg:80.89ms +[2025-07-05 10:17:22] [Rank 0] step:241/10000 train_time:19330ms step_avg:80.21ms +[2025-07-05 10:17:22] [Rank 0] step:241/10000 train_time:19330ms step_avg:80.21ms +[2025-07-05 10:17:23] [Rank 0] step:261/10000 train_time:20785ms step_avg:79.64ms +[2025-07-05 10:17:23] [Rank 0] step:261/10000 train_time:20785ms step_avg:79.64ms +[2025-07-05 10:17:25] [Rank 0] step:281/10000 train_time:22892ms step_avg:81.47ms +[2025-07-05 10:17:25] [Rank 0] step:281/10000 train_time:22892ms step_avg:81.47ms +[2025-07-05 10:17:27] [Rank 0] step:301/10000 train_time:24347ms step_avg:80.89ms +[2025-07-05 10:17:27] [Rank 0] step:301/10000 train_time:24347ms step_avg:80.89ms +[2025-07-05 10:17:28] [Rank 0] step:321/10000 train_time:25800ms step_avg:80.37ms +[2025-07-05 10:17:28] [Rank 0] step:321/10000 train_time:25800ms step_avg:80.37ms +[2025-07-05 10:17:30] [Rank 0] step:341/10000 train_time:27254ms step_avg:79.92ms +[2025-07-05 10:17:30] [Rank 0] step:341/10000 train_time:27254ms step_avg:79.92ms +[2025-07-05 10:17:32] [Rank 0] step:361/10000 train_time:28968ms step_avg:80.24ms +[2025-07-05 10:17:32] [Rank 0] step:361/10000 train_time:28968ms step_avg:80.24ms +[2025-07-05 10:17:33] [Rank 0] step:381/10000 train_time:30823ms step_avg:80.90ms +[2025-07-05 10:17:33] [Rank 0] step:381/10000 train_time:30823ms step_avg:80.90ms +[2025-07-05 10:17:35] [Rank 0] step:401/10000 train_time:32278ms step_avg:80.49ms +[2025-07-05 10:17:35] [Rank 0] step:401/10000 train_time:32278ms step_avg:80.49ms +[2025-07-05 10:17:36] [Rank 0] step:421/10000 train_time:33734ms step_avg:80.13ms +[2025-07-05 10:17:36] [Rank 0] step:421/10000 train_time:33734ms step_avg:80.13ms +[2025-07-05 10:17:38] [Rank 0] step:441/10000 train_time:35190ms step_avg:79.80ms +[2025-07-05 10:17:38] [Rank 0] step:441/10000 train_time:35190ms step_avg:79.80ms +[2025-07-05 10:17:40] [Rank 0] step:461/10000 train_time:37294ms step_avg:80.90ms +[2025-07-05 10:17:40] [Rank 0] step:461/10000 train_time:37294ms step_avg:80.90ms +[2025-07-05 10:17:41] [Rank 0] step:481/10000 train_time:38750ms step_avg:80.56ms +[2025-07-05 10:17:41] [Rank 0] step:481/10000 train_time:38750ms step_avg:80.56ms +[2025-07-05 10:17:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:17:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:17:44] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5926 train_time:40203ms step_avg:80.41ms +[2025-07-05 10:17:44] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5926 train_time:40203ms step_avg:80.41ms +[2025-07-05 10:17:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:17:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..68b30e4a950ee09ef85e3bfbbce74123975505d4 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f37b2a7a-5571-4034-87a8-00821d2f4711", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_f37b2a7a-5571-4034-87a8-00821d2f4711.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_f37b2a7a-5571-4034-87a8-00821d2f4711.txt new file mode 100644 index 0000000000000000000000000000000000000000..2e8d56d3ccd7e461ae1ec2d16676d5075dd208a1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_f37b2a7a-5571-4034-87a8-00821d2f4711.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:15:10] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:15:10 2025 --- +[2025-07-05 08:15:10] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:15:10 2025 --- +[2025-07-05 08:15:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:15:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:15:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:15:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:15:10] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:15:10] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:15:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42 +[2025-07-05 08:15:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42 +[2025-07-05 08:15:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:15:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:15:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:15:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:15:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:15:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:15:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:15:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:15:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:15:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:15:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:15:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:15:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:15:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:15:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:15:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:15:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:15:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:15:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:15:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:15:13] [Rank 0] PRINT: Model returns: +[2025-07-05 08:15:13] [Rank 0] PRINT: Model returns: +[2025-07-05 08:15:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:15:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:15:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:15:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:15:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:15:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:15:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:15:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:15:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:15:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:15:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:15:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:15:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:15:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:15:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:15:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:16:20] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:16:20] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:16:20] [Rank 0] PRINT: Starting training... +[2025-07-05 08:16:20] [Rank 0] PRINT: Starting training... +[2025-07-05 08:16:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:16:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:16:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:16:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:16:30] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 08:16:30] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 08:16:31] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.21ms +[2025-07-05 08:16:31] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.21ms +[2025-07-05 08:16:33] [Rank 0] step:61/10000 train_time:4455ms step_avg:73.04ms +[2025-07-05 08:16:33] [Rank 0] step:61/10000 train_time:4455ms step_avg:73.04ms +[2025-07-05 08:16:34] [Rank 0] step:81/10000 train_time:5910ms step_avg:72.96ms +[2025-07-05 08:16:34] [Rank 0] step:81/10000 train_time:5910ms step_avg:72.96ms +[2025-07-05 08:16:36] [Rank 0] step:101/10000 train_time:7607ms step_avg:75.31ms +[2025-07-05 08:16:36] [Rank 0] step:101/10000 train_time:7607ms step_avg:75.31ms +[2025-07-05 08:16:37] [Rank 0] step:121/10000 train_time:9062ms step_avg:74.90ms +[2025-07-05 08:16:37] [Rank 0] step:121/10000 train_time:9062ms step_avg:74.90ms +[2025-07-05 08:16:39] [Rank 0] step:141/10000 train_time:10519ms step_avg:74.60ms +[2025-07-05 08:16:39] [Rank 0] step:141/10000 train_time:10519ms step_avg:74.60ms +[2025-07-05 08:16:40] [Rank 0] step:161/10000 train_time:11976ms step_avg:74.39ms +[2025-07-05 08:16:40] [Rank 0] step:161/10000 train_time:11976ms step_avg:74.39ms +[2025-07-05 08:16:42] [Rank 0] step:181/10000 train_time:13433ms step_avg:74.22ms +[2025-07-05 08:16:42] [Rank 0] step:181/10000 train_time:13433ms step_avg:74.22ms +[2025-07-05 08:16:44] [Rank 0] step:201/10000 train_time:15531ms step_avg:77.27ms +[2025-07-05 08:16:44] [Rank 0] step:201/10000 train_time:15531ms step_avg:77.27ms +[2025-07-05 08:16:45] [Rank 0] step:221/10000 train_time:16986ms step_avg:76.86ms +[2025-07-05 08:16:45] [Rank 0] step:221/10000 train_time:16986ms step_avg:76.86ms +[2025-07-05 08:16:47] [Rank 0] step:241/10000 train_time:18442ms step_avg:76.52ms +[2025-07-05 08:16:47] [Rank 0] step:241/10000 train_time:18442ms step_avg:76.52ms +[2025-07-05 08:16:48] [Rank 0] step:261/10000 train_time:19900ms step_avg:76.25ms +[2025-07-05 08:16:48] [Rank 0] step:261/10000 train_time:19900ms step_avg:76.25ms +[2025-07-05 08:16:50] [Rank 0] step:281/10000 train_time:21594ms step_avg:76.85ms +[2025-07-05 08:16:50] [Rank 0] step:281/10000 train_time:21594ms step_avg:76.85ms +[2025-07-05 08:16:51] [Rank 0] step:301/10000 train_time:23054ms step_avg:76.59ms +[2025-07-05 08:16:51] [Rank 0] step:301/10000 train_time:23054ms step_avg:76.59ms +[2025-07-05 08:16:53] [Rank 0] step:321/10000 train_time:24513ms step_avg:76.36ms +[2025-07-05 08:16:53] [Rank 0] step:321/10000 train_time:24513ms step_avg:76.36ms +[2025-07-05 08:16:54] [Rank 0] step:341/10000 train_time:25972ms step_avg:76.17ms +[2025-07-05 08:16:54] [Rank 0] step:341/10000 train_time:25972ms step_avg:76.17ms +[2025-07-05 08:16:56] [Rank 0] step:361/10000 train_time:27432ms step_avg:75.99ms +[2025-07-05 08:16:56] [Rank 0] step:361/10000 train_time:27432ms step_avg:75.99ms +[2025-07-05 08:16:58] [Rank 0] step:381/10000 train_time:29551ms step_avg:77.56ms +[2025-07-05 08:16:58] [Rank 0] step:381/10000 train_time:29551ms step_avg:77.56ms +[2025-07-05 08:16:59] [Rank 0] step:401/10000 train_time:31012ms step_avg:77.34ms +[2025-07-05 08:16:59] [Rank 0] step:401/10000 train_time:31012ms step_avg:77.34ms +[2025-07-05 08:17:01] [Rank 0] step:421/10000 train_time:32474ms step_avg:77.13ms +[2025-07-05 08:17:01] [Rank 0] step:421/10000 train_time:32474ms step_avg:77.13ms +[2025-07-05 08:17:02] [Rank 0] step:441/10000 train_time:34034ms step_avg:77.18ms +[2025-07-05 08:17:02] [Rank 0] step:441/10000 train_time:34034ms step_avg:77.18ms +[2025-07-05 08:17:04] [Rank 0] step:461/10000 train_time:36145ms step_avg:78.41ms +[2025-07-05 08:17:04] [Rank 0] step:461/10000 train_time:36145ms step_avg:78.41ms +[2025-07-05 08:17:06] [Rank 0] step:481/10000 train_time:37606ms step_avg:78.18ms +[2025-07-05 08:17:06] [Rank 0] step:481/10000 train_time:37606ms step_avg:78.18ms +[2025-07-05 08:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:17:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:17:08] [Rank 0] PRINT: step:500/10000 train_loss:8.7306 val_loss:7.1013 train_time:39067ms step_avg:78.13ms +[2025-07-05 08:17:08] [Rank 0] PRINT: step:500/10000 train_loss:8.7306 val_loss:7.1013 train_time:39067ms step_avg:78.13ms +[2025-07-05 08:17:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:17:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b650b3987e83c2f2f1f09e164f7b82d062ce90a7 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "10f9bbec-bf62-4667-920e-5b37450bed80", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_10f9bbec-bf62-4667-920e-5b37450bed80.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_10f9bbec-bf62-4667-920e-5b37450bed80.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d43f7f7b18de55b74365899386a43a6326a2433 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_10f9bbec-bf62-4667-920e-5b37450bed80.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:42:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:42:27 2025 --- +[2025-07-05 08:42:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:42:27 2025 --- +[2025-07-05 08:42:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:42:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:42:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:42:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:42:27] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:42:27] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:42:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43 +[2025-07-05 08:42:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43 +[2025-07-05 08:42:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:42:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:42:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:42:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:42:27] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:42:27] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:42:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:42:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:42:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:42:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:42:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:42:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:42:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:42:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:42:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:42:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:42:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:42:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:42:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:42:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:42:30] [Rank 0] PRINT: Model returns: +[2025-07-05 08:42:30] [Rank 0] PRINT: Model returns: +[2025-07-05 08:42:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:42:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:42:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:42:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:42:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:42:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:42:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:42:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:42:30] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:42:30] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:42:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:42:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:42:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:42:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:42:30] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:42:30] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:43:35] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:43:35] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:43:35] [Rank 0] PRINT: Starting training... +[2025-07-05 08:43:35] [Rank 0] PRINT: Starting training... +[2025-07-05 08:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:43:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:43:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:43:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:43:44] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.70ms +[2025-07-05 08:43:44] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.70ms +[2025-07-05 08:43:45] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.23ms +[2025-07-05 08:43:45] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.23ms +[2025-07-05 08:43:47] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.04ms +[2025-07-05 08:43:47] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.04ms +[2025-07-05 08:43:48] [Rank 0] step:81/10000 train_time:5910ms step_avg:72.96ms +[2025-07-05 08:43:48] [Rank 0] step:81/10000 train_time:5910ms step_avg:72.96ms +[2025-07-05 08:43:50] [Rank 0] step:101/10000 train_time:8023ms step_avg:79.43ms +[2025-07-05 08:43:50] [Rank 0] step:101/10000 train_time:8023ms step_avg:79.43ms +[2025-07-05 08:43:52] [Rank 0] step:121/10000 train_time:9475ms step_avg:78.30ms +[2025-07-05 08:43:52] [Rank 0] step:121/10000 train_time:9475ms step_avg:78.30ms +[2025-07-05 08:43:53] [Rank 0] step:141/10000 train_time:10930ms step_avg:77.52ms +[2025-07-05 08:43:53] [Rank 0] step:141/10000 train_time:10930ms step_avg:77.52ms +[2025-07-05 08:43:55] [Rank 0] step:161/10000 train_time:12387ms step_avg:76.94ms +[2025-07-05 08:43:55] [Rank 0] step:161/10000 train_time:12387ms step_avg:76.94ms +[2025-07-05 08:43:57] [Rank 0] step:181/10000 train_time:13841ms step_avg:76.47ms +[2025-07-05 08:43:57] [Rank 0] step:181/10000 train_time:13841ms step_avg:76.47ms +[2025-07-05 08:43:58] [Rank 0] step:201/10000 train_time:15947ms step_avg:79.34ms +[2025-07-05 08:43:58] [Rank 0] step:201/10000 train_time:15947ms step_avg:79.34ms +[2025-07-05 08:44:00] [Rank 0] step:221/10000 train_time:17402ms step_avg:78.74ms +[2025-07-05 08:44:00] [Rank 0] step:221/10000 train_time:17402ms step_avg:78.74ms +[2025-07-05 08:44:01] [Rank 0] step:241/10000 train_time:18858ms step_avg:78.25ms +[2025-07-05 08:44:01] [Rank 0] step:241/10000 train_time:18858ms step_avg:78.25ms +[2025-07-05 08:44:03] [Rank 0] step:261/10000 train_time:20315ms step_avg:77.84ms +[2025-07-05 08:44:03] [Rank 0] step:261/10000 train_time:20315ms step_avg:77.84ms +[2025-07-05 08:44:04] [Rank 0] step:281/10000 train_time:22010ms step_avg:78.33ms +[2025-07-05 08:44:04] [Rank 0] step:281/10000 train_time:22010ms step_avg:78.33ms +[2025-07-05 08:44:06] [Rank 0] step:301/10000 train_time:23469ms step_avg:77.97ms +[2025-07-05 08:44:06] [Rank 0] step:301/10000 train_time:23469ms step_avg:77.97ms +[2025-07-05 08:44:07] [Rank 0] step:321/10000 train_time:24931ms step_avg:77.67ms +[2025-07-05 08:44:07] [Rank 0] step:321/10000 train_time:24931ms step_avg:77.67ms +[2025-07-05 08:44:09] [Rank 0] step:341/10000 train_time:26392ms step_avg:77.40ms +[2025-07-05 08:44:09] [Rank 0] step:341/10000 train_time:26392ms step_avg:77.40ms +[2025-07-05 08:44:11] [Rank 0] step:361/10000 train_time:27903ms step_avg:77.29ms +[2025-07-05 08:44:11] [Rank 0] step:361/10000 train_time:27903ms step_avg:77.29ms +[2025-07-05 08:44:12] [Rank 0] step:381/10000 train_time:29968ms step_avg:78.66ms +[2025-07-05 08:44:12] [Rank 0] step:381/10000 train_time:29968ms step_avg:78.66ms +[2025-07-05 08:44:14] [Rank 0] step:401/10000 train_time:31428ms step_avg:78.37ms +[2025-07-05 08:44:14] [Rank 0] step:401/10000 train_time:31428ms step_avg:78.37ms +[2025-07-05 08:44:15] [Rank 0] step:421/10000 train_time:32890ms step_avg:78.12ms +[2025-07-05 08:44:15] [Rank 0] step:421/10000 train_time:32890ms step_avg:78.12ms +[2025-07-05 08:44:17] [Rank 0] step:441/10000 train_time:34351ms step_avg:77.89ms +[2025-07-05 08:44:17] [Rank 0] step:441/10000 train_time:34351ms step_avg:77.89ms +[2025-07-05 08:44:19] [Rank 0] step:461/10000 train_time:36466ms step_avg:79.10ms +[2025-07-05 08:44:19] [Rank 0] step:461/10000 train_time:36466ms step_avg:79.10ms +[2025-07-05 08:44:20] [Rank 0] step:481/10000 train_time:37928ms step_avg:78.85ms +[2025-07-05 08:44:20] [Rank 0] step:481/10000 train_time:37928ms step_avg:78.85ms +[2025-07-05 08:44:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:44:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:44:23] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1022 train_time:39391ms step_avg:78.78ms +[2025-07-05 08:44:23] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1022 train_time:39391ms step_avg:78.78ms +[2025-07-05 08:44:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:44:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3596e541944d4c8b3639f52d342fdb2a19d7f073 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "90bd20f3-2faf-48d6-8ef5-5be1824fef14", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/training_log_90bd20f3-2faf-48d6-8ef5-5be1824fef14.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/training_log_90bd20f3-2faf-48d6-8ef5-5be1824fef14.txt new file mode 100644 index 0000000000000000000000000000000000000000..f73ad2d5ee694d72a0b8eb37e9aa89f7301809c1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44/training_log_90bd20f3-2faf-48d6-8ef5-5be1824fef14.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:10:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:10:03 2025 --- +[2025-07-05 09:10:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:10:03 2025 --- +[2025-07-05 09:10:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:10:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:10:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:10:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:10:03] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:10:03] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:10:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44 +[2025-07-05 09:10:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_44 +[2025-07-05 09:10:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:10:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:10:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:10:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:10:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:10:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:10:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:10:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:10:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:10:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:10:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:10:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:10:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:10:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:10:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:10:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:10:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:10:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:10:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:10:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:10:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:10:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:10:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:10:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:10:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:10:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:10:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:10:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:10:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:10:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:10:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:10:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:10:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:10:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:10:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:10:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:10:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:10:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:11:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:11:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:11:12] [Rank 0] PRINT: Starting training... +[2025-07-05 09:11:12] [Rank 0] PRINT: Starting training... +[2025-07-05 09:11:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:11:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:11:21] [Rank 0] step:21/10000 train_time:1751ms step_avg:83.39ms +[2025-07-05 09:11:21] [Rank 0] step:21/10000 train_time:1751ms step_avg:83.39ms +[2025-07-05 09:11:22] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-05 09:11:22] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-05 09:11:24] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-05 09:11:24] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-05 09:11:25] [Rank 0] step:81/10000 train_time:6112ms step_avg:75.45ms +[2025-07-05 09:11:25] [Rank 0] step:81/10000 train_time:6112ms step_avg:75.45ms +[2025-07-05 09:11:27] [Rank 0] step:101/10000 train_time:7805ms step_avg:77.27ms +[2025-07-05 09:11:27] [Rank 0] step:101/10000 train_time:7805ms step_avg:77.27ms +[2025-07-05 09:11:28] [Rank 0] step:121/10000 train_time:9258ms step_avg:76.51ms +[2025-07-05 09:11:28] [Rank 0] step:121/10000 train_time:9258ms step_avg:76.51ms +[2025-07-05 09:11:30] [Rank 0] step:141/10000 train_time:10713ms step_avg:75.98ms +[2025-07-05 09:11:30] [Rank 0] step:141/10000 train_time:10713ms step_avg:75.98ms +[2025-07-05 09:11:31] [Rank 0] step:161/10000 train_time:12167ms step_avg:75.57ms +[2025-07-05 09:11:31] [Rank 0] step:161/10000 train_time:12167ms step_avg:75.57ms +[2025-07-05 09:11:33] [Rank 0] step:181/10000 train_time:13875ms step_avg:76.66ms +[2025-07-05 09:11:33] [Rank 0] step:181/10000 train_time:13875ms step_avg:76.66ms +[2025-07-05 09:11:35] [Rank 0] step:201/10000 train_time:15741ms step_avg:78.31ms +[2025-07-05 09:11:35] [Rank 0] step:201/10000 train_time:15741ms step_avg:78.31ms +[2025-07-05 09:11:36] [Rank 0] step:221/10000 train_time:17195ms step_avg:77.81ms +[2025-07-05 09:11:36] [Rank 0] step:221/10000 train_time:17195ms step_avg:77.81ms +[2025-07-05 09:11:38] [Rank 0] step:241/10000 train_time:18650ms step_avg:77.39ms +[2025-07-05 09:11:38] [Rank 0] step:241/10000 train_time:18650ms step_avg:77.39ms +[2025-07-05 09:11:39] [Rank 0] step:261/10000 train_time:20108ms step_avg:77.04ms +[2025-07-05 09:11:39] [Rank 0] step:261/10000 train_time:20108ms step_avg:77.04ms +[2025-07-05 09:11:41] [Rank 0] step:281/10000 train_time:21801ms step_avg:77.58ms +[2025-07-05 09:11:41] [Rank 0] step:281/10000 train_time:21801ms step_avg:77.58ms +[2025-07-05 09:11:42] [Rank 0] step:301/10000 train_time:23260ms step_avg:77.28ms +[2025-07-05 09:11:42] [Rank 0] step:301/10000 train_time:23260ms step_avg:77.28ms +[2025-07-05 09:11:44] [Rank 0] step:321/10000 train_time:24729ms step_avg:77.04ms +[2025-07-05 09:11:44] [Rank 0] step:321/10000 train_time:24729ms step_avg:77.04ms +[2025-07-05 09:11:45] [Rank 0] step:341/10000 train_time:26188ms step_avg:76.80ms +[2025-07-05 09:11:45] [Rank 0] step:341/10000 train_time:26188ms step_avg:76.80ms +[2025-07-05 09:11:47] [Rank 0] step:361/10000 train_time:27646ms step_avg:76.58ms +[2025-07-05 09:11:47] [Rank 0] step:361/10000 train_time:27646ms step_avg:76.58ms +[2025-07-05 09:11:48] [Rank 0] step:381/10000 train_time:29343ms step_avg:77.02ms +[2025-07-05 09:11:48] [Rank 0] step:381/10000 train_time:29343ms step_avg:77.02ms +[2025-07-05 09:11:50] [Rank 0] step:401/10000 train_time:30802ms step_avg:76.81ms +[2025-07-05 09:11:50] [Rank 0] step:401/10000 train_time:30802ms step_avg:76.81ms +[2025-07-05 09:11:51] [Rank 0] step:421/10000 train_time:32262ms step_avg:76.63ms +[2025-07-05 09:11:51] [Rank 0] step:421/10000 train_time:32262ms step_avg:76.63ms +[2025-07-05 09:11:53] [Rank 0] step:441/10000 train_time:33726ms step_avg:76.48ms +[2025-07-05 09:11:53] [Rank 0] step:441/10000 train_time:33726ms step_avg:76.48ms +[2025-07-05 09:11:54] [Rank 0] step:461/10000 train_time:35422ms step_avg:76.84ms +[2025-07-05 09:11:54] [Rank 0] step:461/10000 train_time:35422ms step_avg:76.84ms +[2025-07-05 09:11:56] [Rank 0] step:481/10000 train_time:36887ms step_avg:76.69ms +[2025-07-05 09:11:56] [Rank 0] step:481/10000 train_time:36887ms step_avg:76.69ms +[2025-07-05 09:11:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:58] [Rank 0] PRINT: step:500/10000 train_loss:8.7305 val_loss:7.1014 train_time:38505ms step_avg:77.01ms +[2025-07-05 09:11:58] [Rank 0] PRINT: step:500/10000 train_loss:8.7305 val_loss:7.1014 train_time:38505ms step_avg:77.01ms +[2025-07-05 09:11:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:11:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..658e5dc70fcd2dc8f6b00a75bab9c2dd280e7872 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e51a8350-9205-47a5-9f4a-ee72a81b4dd1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_e51a8350-9205-47a5-9f4a-ee72a81b4dd1.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_e51a8350-9205-47a5-9f4a-ee72a81b4dd1.txt new file mode 100644 index 0000000000000000000000000000000000000000..410a79efeadc6c26bffce82835602f52aabce2b7 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_e51a8350-9205-47a5-9f4a-ee72a81b4dd1.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:37:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:37:03 2025 --- +[2025-07-05 09:37:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:37:03 2025 --- +[2025-07-05 09:37:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:37:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:37:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:37:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:37:03] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:37:03] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:37:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45 +[2025-07-05 09:37:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45 +[2025-07-05 09:37:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:37:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:37:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:37:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:37:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:37:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:37:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:37:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:37:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:37:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:37:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:37:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:37:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:37:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:37:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:37:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:37:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:37:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:37:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:37:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:37:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:37:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:37:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:37:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:37:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:37:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:37:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:37:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:37:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:37:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:37:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:37:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:37:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:37:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:37:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:37:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:37:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:37:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:38:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:38:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:38:13] [Rank 0] PRINT: Starting training... +[2025-07-05 09:38:13] [Rank 0] PRINT: Starting training... +[2025-07-05 09:38:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:38:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:38:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:38:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:38:22] [Rank 0] step:21/10000 train_time:1746ms step_avg:83.17ms +[2025-07-05 09:38:22] [Rank 0] step:21/10000 train_time:1746ms step_avg:83.17ms +[2025-07-05 09:38:24] [Rank 0] step:41/10000 train_time:3205ms step_avg:78.17ms +[2025-07-05 09:38:24] [Rank 0] step:41/10000 train_time:3205ms step_avg:78.17ms +[2025-07-05 09:38:25] [Rank 0] step:61/10000 train_time:4656ms step_avg:76.33ms +[2025-07-05 09:38:25] [Rank 0] step:61/10000 train_time:4656ms step_avg:76.33ms +[2025-07-05 09:38:27] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-05 09:38:27] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-05 09:38:28] [Rank 0] step:101/10000 train_time:7607ms step_avg:75.31ms +[2025-07-05 09:38:28] [Rank 0] step:101/10000 train_time:7607ms step_avg:75.31ms +[2025-07-05 09:38:30] [Rank 0] step:121/10000 train_time:9061ms step_avg:74.89ms +[2025-07-05 09:38:30] [Rank 0] step:121/10000 train_time:9061ms step_avg:74.89ms +[2025-07-05 09:38:31] [Rank 0] step:141/10000 train_time:10520ms step_avg:74.61ms +[2025-07-05 09:38:31] [Rank 0] step:141/10000 train_time:10520ms step_avg:74.61ms +[2025-07-05 09:38:33] [Rank 0] step:161/10000 train_time:11973ms step_avg:74.37ms +[2025-07-05 09:38:33] [Rank 0] step:161/10000 train_time:11973ms step_avg:74.37ms +[2025-07-05 09:38:34] [Rank 0] step:181/10000 train_time:13429ms step_avg:74.19ms +[2025-07-05 09:38:34] [Rank 0] step:181/10000 train_time:13429ms step_avg:74.19ms +[2025-07-05 09:38:36] [Rank 0] step:201/10000 train_time:14918ms step_avg:74.22ms +[2025-07-05 09:38:36] [Rank 0] step:201/10000 train_time:14918ms step_avg:74.22ms +[2025-07-05 09:38:37] [Rank 0] step:221/10000 train_time:16375ms step_avg:74.10ms +[2025-07-05 09:38:37] [Rank 0] step:221/10000 train_time:16375ms step_avg:74.10ms +[2025-07-05 09:38:38] [Rank 0] step:241/10000 train_time:17834ms step_avg:74.00ms +[2025-07-05 09:38:38] [Rank 0] step:241/10000 train_time:17834ms step_avg:74.00ms +[2025-07-05 09:38:40] [Rank 0] step:261/10000 train_time:19288ms step_avg:73.90ms +[2025-07-05 09:38:40] [Rank 0] step:261/10000 train_time:19288ms step_avg:73.90ms +[2025-07-05 09:38:42] [Rank 0] step:281/10000 train_time:20985ms step_avg:74.68ms +[2025-07-05 09:38:42] [Rank 0] step:281/10000 train_time:20985ms step_avg:74.68ms +[2025-07-05 09:38:43] [Rank 0] step:301/10000 train_time:22443ms step_avg:74.56ms +[2025-07-05 09:38:43] [Rank 0] step:301/10000 train_time:22443ms step_avg:74.56ms +[2025-07-05 09:38:45] [Rank 0] step:321/10000 train_time:23903ms step_avg:74.47ms +[2025-07-05 09:38:45] [Rank 0] step:321/10000 train_time:23903ms step_avg:74.47ms +[2025-07-05 09:38:46] [Rank 0] step:341/10000 train_time:25463ms step_avg:74.67ms +[2025-07-05 09:38:46] [Rank 0] step:341/10000 train_time:25463ms step_avg:74.67ms +[2025-07-05 09:38:48] [Rank 0] step:361/10000 train_time:27022ms step_avg:74.85ms +[2025-07-05 09:38:48] [Rank 0] step:361/10000 train_time:27022ms step_avg:74.85ms +[2025-07-05 09:38:49] [Rank 0] step:381/10000 train_time:28717ms step_avg:75.37ms +[2025-07-05 09:38:49] [Rank 0] step:381/10000 train_time:28717ms step_avg:75.37ms +[2025-07-05 09:38:51] [Rank 0] step:401/10000 train_time:30177ms step_avg:75.25ms +[2025-07-05 09:38:51] [Rank 0] step:401/10000 train_time:30177ms step_avg:75.25ms +[2025-07-05 09:38:52] [Rank 0] step:421/10000 train_time:31641ms step_avg:75.16ms +[2025-07-05 09:38:52] [Rank 0] step:421/10000 train_time:31641ms step_avg:75.16ms +[2025-07-05 09:38:54] [Rank 0] step:441/10000 train_time:33101ms step_avg:75.06ms +[2025-07-05 09:38:54] [Rank 0] step:441/10000 train_time:33101ms step_avg:75.06ms +[2025-07-05 09:38:56] [Rank 0] step:461/10000 train_time:35221ms step_avg:76.40ms +[2025-07-05 09:38:56] [Rank 0] step:461/10000 train_time:35221ms step_avg:76.40ms +[2025-07-05 09:38:57] [Rank 0] step:481/10000 train_time:36685ms step_avg:76.27ms +[2025-07-05 09:38:57] [Rank 0] step:481/10000 train_time:36685ms step_avg:76.27ms +[2025-07-05 09:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:39:00] [Rank 0] PRINT: step:500/10000 train_loss:8.7313 val_loss:7.1018 train_time:38147ms step_avg:76.29ms +[2025-07-05 09:39:00] [Rank 0] PRINT: step:500/10000 train_loss:8.7313 val_loss:7.1018 train_time:38147ms step_avg:76.29ms +[2025-07-05 09:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0020f5b925f8f950edbb6e419eba6d06b6ae815b --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "b8bfd9ed-d693-408c-beb1-933c06f91bfd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/training_log_b8bfd9ed-d693-408c-beb1-933c06f91bfd.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/training_log_b8bfd9ed-d693-408c-beb1-933c06f91bfd.txt new file mode 100644 index 0000000000000000000000000000000000000000..727c0e1dedb450ef46c008790112e1c52fce5fba --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46/training_log_b8bfd9ed-d693-408c-beb1-933c06f91bfd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:04:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:04:20 2025 --- +[2025-07-05 10:04:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:04:20 2025 --- +[2025-07-05 10:04:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:04:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:04:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:04:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:04:20] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:04:20] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:04:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46 +[2025-07-05 10:04:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_46 +[2025-07-05 10:04:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:04:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:04:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:04:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:04:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:04:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:04:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:04:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:04:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:04:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:04:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:04:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:04:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:04:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:04:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:04:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:04:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:04:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:04:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:04:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:04:24] [Rank 0] PRINT: Model returns: +[2025-07-05 10:04:24] [Rank 0] PRINT: Model returns: +[2025-07-05 10:04:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:04:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:04:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:04:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:04:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:04:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:04:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:04:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:04:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:04:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:04:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:04:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:04:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:04:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:04:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:04:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:05:30] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:05:30] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:05:30] [Rank 0] PRINT: Starting training... +[2025-07-05 10:05:30] [Rank 0] PRINT: Starting training... +[2025-07-05 10:05:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:05:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:05:39] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 10:05:39] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 10:05:40] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.24ms +[2025-07-05 10:05:40] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.24ms +[2025-07-05 10:05:42] [Rank 0] step:61/10000 train_time:4664ms step_avg:76.46ms +[2025-07-05 10:05:42] [Rank 0] step:61/10000 train_time:4664ms step_avg:76.46ms +[2025-07-05 10:05:43] [Rank 0] step:81/10000 train_time:6382ms step_avg:78.78ms +[2025-07-05 10:05:43] [Rank 0] step:81/10000 train_time:6382ms step_avg:78.78ms +[2025-07-05 10:05:46] [Rank 0] step:101/10000 train_time:8693ms step_avg:86.07ms +[2025-07-05 10:05:46] [Rank 0] step:101/10000 train_time:8693ms step_avg:86.07ms +[2025-07-05 10:05:47] [Rank 0] step:121/10000 train_time:10148ms step_avg:83.87ms +[2025-07-05 10:05:47] [Rank 0] step:121/10000 train_time:10148ms step_avg:83.87ms +[2025-07-05 10:05:48] [Rank 0] step:141/10000 train_time:11605ms step_avg:82.30ms +[2025-07-05 10:05:48] [Rank 0] step:141/10000 train_time:11605ms step_avg:82.30ms +[2025-07-05 10:05:50] [Rank 0] step:161/10000 train_time:13062ms step_avg:81.13ms +[2025-07-05 10:05:50] [Rank 0] step:161/10000 train_time:13062ms step_avg:81.13ms +[2025-07-05 10:05:52] [Rank 0] step:181/10000 train_time:15192ms step_avg:83.94ms +[2025-07-05 10:05:52] [Rank 0] step:181/10000 train_time:15192ms step_avg:83.94ms +[2025-07-05 10:05:54] [Rank 0] step:201/10000 train_time:16630ms step_avg:82.73ms +[2025-07-05 10:05:54] [Rank 0] step:201/10000 train_time:16630ms step_avg:82.73ms +[2025-07-05 10:05:55] [Rank 0] step:221/10000 train_time:18086ms step_avg:81.84ms +[2025-07-05 10:05:55] [Rank 0] step:221/10000 train_time:18086ms step_avg:81.84ms +[2025-07-05 10:05:56] [Rank 0] step:241/10000 train_time:19546ms step_avg:81.10ms +[2025-07-05 10:05:56] [Rank 0] step:241/10000 train_time:19546ms step_avg:81.10ms +[2025-07-05 10:05:58] [Rank 0] step:261/10000 train_time:21003ms step_avg:80.47ms +[2025-07-05 10:05:58] [Rank 0] step:261/10000 train_time:21003ms step_avg:80.47ms +[2025-07-05 10:06:00] [Rank 0] step:281/10000 train_time:22699ms step_avg:80.78ms +[2025-07-05 10:06:00] [Rank 0] step:281/10000 train_time:22699ms step_avg:80.78ms +[2025-07-05 10:06:01] [Rank 0] step:301/10000 train_time:24157ms step_avg:80.26ms +[2025-07-05 10:06:01] [Rank 0] step:301/10000 train_time:24157ms step_avg:80.26ms +[2025-07-05 10:06:03] [Rank 0] step:321/10000 train_time:25619ms step_avg:79.81ms +[2025-07-05 10:06:03] [Rank 0] step:321/10000 train_time:25619ms step_avg:79.81ms +[2025-07-05 10:06:04] [Rank 0] step:341/10000 train_time:27080ms step_avg:79.41ms +[2025-07-05 10:06:04] [Rank 0] step:341/10000 train_time:27080ms step_avg:79.41ms +[2025-07-05 10:06:06] [Rank 0] step:361/10000 train_time:28540ms step_avg:79.06ms +[2025-07-05 10:06:06] [Rank 0] step:361/10000 train_time:28540ms step_avg:79.06ms +[2025-07-05 10:06:07] [Rank 0] step:381/10000 train_time:30240ms step_avg:79.37ms +[2025-07-05 10:06:07] [Rank 0] step:381/10000 train_time:30240ms step_avg:79.37ms +[2025-07-05 10:06:09] [Rank 0] step:401/10000 train_time:31701ms step_avg:79.06ms +[2025-07-05 10:06:09] [Rank 0] step:401/10000 train_time:31701ms step_avg:79.06ms +[2025-07-05 10:06:10] [Rank 0] step:421/10000 train_time:33164ms step_avg:78.78ms +[2025-07-05 10:06:10] [Rank 0] step:421/10000 train_time:33164ms step_avg:78.78ms +[2025-07-05 10:06:12] [Rank 0] step:441/10000 train_time:34630ms step_avg:78.53ms +[2025-07-05 10:06:12] [Rank 0] step:441/10000 train_time:34630ms step_avg:78.53ms +[2025-07-05 10:06:13] [Rank 0] step:461/10000 train_time:36328ms step_avg:78.80ms +[2025-07-05 10:06:13] [Rank 0] step:461/10000 train_time:36328ms step_avg:78.80ms +[2025-07-05 10:06:15] [Rank 0] step:481/10000 train_time:37792ms step_avg:78.57ms +[2025-07-05 10:06:15] [Rank 0] step:481/10000 train_time:37792ms step_avg:78.57ms +[2025-07-05 10:06:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:06:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:06:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1010 train_time:39256ms step_avg:78.51ms +[2025-07-05 10:06:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1010 train_time:39256ms step_avg:78.51ms +[2025-07-05 10:06:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:06:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2f348058cf295dd41d6695950ff38cb0efc6f28d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5c689889-2122-4cc3-bd3a-49defc27e9e1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/training_log_5c689889-2122-4cc3-bd3a-49defc27e9e1.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/training_log_5c689889-2122-4cc3-bd3a-49defc27e9e1.txt new file mode 100644 index 0000000000000000000000000000000000000000..120237d67120770ab8d7cd66576a9b4de96c40f4 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47/training_log_5c689889-2122-4cc3-bd3a-49defc27e9e1.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:23:37] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:23:37 2025 --- +[2025-07-05 08:23:37] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:23:37 2025 --- +[2025-07-05 08:23:37] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:23:37] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:23:37] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:23:37] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:23:37] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:23:37] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:23:37] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47 +[2025-07-05 08:23:37] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_47 +[2025-07-05 08:23:37] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:23:37] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:23:37] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:23:37] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:23:37] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:23:37] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:23:39] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:23:39] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:23:39] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:23:39] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:23:39] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:23:39] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:23:40] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:23:40] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:23:40] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:23:40] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:23:40] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:23:40] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:23:40] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:23:40] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:23:40] [Rank 0] PRINT: Model returns: +[2025-07-05 08:23:40] [Rank 0] PRINT: Model returns: +[2025-07-05 08:23:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:23:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:23:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:23:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:23:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:23:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:23:40] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:23:40] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:23:40] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:23:40] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:23:40] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:23:40] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:23:40] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:23:40] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:23:40] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:23:40] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:24:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:24:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:24:48] [Rank 0] PRINT: Starting training... +[2025-07-05 08:24:48] [Rank 0] PRINT: Starting training... +[2025-07-05 08:24:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:24:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:24:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:24:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:24:58] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.53ms +[2025-07-05 08:24:58] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.53ms +[2025-07-05 08:25:00] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.29ms +[2025-07-05 08:25:00] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.29ms +[2025-07-05 08:25:01] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.40ms +[2025-07-05 08:25:01] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.40ms +[2025-07-05 08:25:03] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-05 08:25:03] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-05 08:25:05] [Rank 0] step:101/10000 train_time:8219ms step_avg:81.38ms +[2025-07-05 08:25:05] [Rank 0] step:101/10000 train_time:8219ms step_avg:81.38ms +[2025-07-05 08:25:06] [Rank 0] step:121/10000 train_time:9675ms step_avg:79.96ms +[2025-07-05 08:25:06] [Rank 0] step:121/10000 train_time:9675ms step_avg:79.96ms +[2025-07-05 08:25:08] [Rank 0] step:141/10000 train_time:11129ms step_avg:78.93ms +[2025-07-05 08:25:08] [Rank 0] step:141/10000 train_time:11129ms step_avg:78.93ms +[2025-07-05 08:25:09] [Rank 0] step:161/10000 train_time:12583ms step_avg:78.16ms +[2025-07-05 08:25:09] [Rank 0] step:161/10000 train_time:12583ms step_avg:78.16ms +[2025-07-05 08:25:11] [Rank 0] step:181/10000 train_time:14716ms step_avg:81.30ms +[2025-07-05 08:25:11] [Rank 0] step:181/10000 train_time:14716ms step_avg:81.30ms +[2025-07-05 08:25:13] [Rank 0] step:201/10000 train_time:16152ms step_avg:80.36ms +[2025-07-05 08:25:13] [Rank 0] step:201/10000 train_time:16152ms step_avg:80.36ms +[2025-07-05 08:25:14] [Rank 0] step:221/10000 train_time:17606ms step_avg:79.67ms +[2025-07-05 08:25:14] [Rank 0] step:221/10000 train_time:17606ms step_avg:79.67ms +[2025-07-05 08:25:16] [Rank 0] step:241/10000 train_time:19063ms step_avg:79.10ms +[2025-07-05 08:25:16] [Rank 0] step:241/10000 train_time:19063ms step_avg:79.10ms +[2025-07-05 08:25:17] [Rank 0] step:261/10000 train_time:20522ms step_avg:78.63ms +[2025-07-05 08:25:17] [Rank 0] step:261/10000 train_time:20522ms step_avg:78.63ms +[2025-07-05 08:25:19] [Rank 0] step:281/10000 train_time:22651ms step_avg:80.61ms +[2025-07-05 08:25:19] [Rank 0] step:281/10000 train_time:22651ms step_avg:80.61ms +[2025-07-05 08:25:21] [Rank 0] step:301/10000 train_time:24108ms step_avg:80.09ms +[2025-07-05 08:25:21] [Rank 0] step:301/10000 train_time:24108ms step_avg:80.09ms +[2025-07-05 08:25:22] [Rank 0] step:321/10000 train_time:25570ms step_avg:79.66ms +[2025-07-05 08:25:22] [Rank 0] step:321/10000 train_time:25570ms step_avg:79.66ms +[2025-07-05 08:25:24] [Rank 0] step:341/10000 train_time:27268ms step_avg:79.96ms +[2025-07-05 08:25:24] [Rank 0] step:341/10000 train_time:27268ms step_avg:79.96ms +[2025-07-05 08:25:26] [Rank 0] step:361/10000 train_time:28726ms step_avg:79.57ms +[2025-07-05 08:25:26] [Rank 0] step:361/10000 train_time:28726ms step_avg:79.57ms +[2025-07-05 08:25:27] [Rank 0] step:381/10000 train_time:30837ms step_avg:80.94ms +[2025-07-05 08:25:27] [Rank 0] step:381/10000 train_time:30837ms step_avg:80.94ms +[2025-07-05 08:25:29] [Rank 0] step:401/10000 train_time:32297ms step_avg:80.54ms +[2025-07-05 08:25:29] [Rank 0] step:401/10000 train_time:32297ms step_avg:80.54ms +[2025-07-05 08:25:30] [Rank 0] step:421/10000 train_time:33761ms step_avg:80.19ms +[2025-07-05 08:25:30] [Rank 0] step:421/10000 train_time:33761ms step_avg:80.19ms +[2025-07-05 08:25:32] [Rank 0] step:441/10000 train_time:35219ms step_avg:79.86ms +[2025-07-05 08:25:32] [Rank 0] step:441/10000 train_time:35219ms step_avg:79.86ms +[2025-07-05 08:25:34] [Rank 0] step:461/10000 train_time:37339ms step_avg:80.99ms +[2025-07-05 08:25:34] [Rank 0] step:461/10000 train_time:37339ms step_avg:80.99ms +[2025-07-05 08:25:35] [Rank 0] step:481/10000 train_time:38800ms step_avg:80.66ms +[2025-07-05 08:25:35] [Rank 0] step:481/10000 train_time:38800ms step_avg:80.66ms +[2025-07-05 08:25:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:25:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:25:38] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1010 train_time:40262ms step_avg:80.52ms +[2025-07-05 08:25:38] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1010 train_time:40262ms step_avg:80.52ms +[2025-07-05 08:25:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:25:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ac1c377ac9bb7d95bf69c9688988162506d69f46 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "be166aa8-fc5f-4f90-b4bc-e73bd5a8322c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_be166aa8-fc5f-4f90-b4bc-e73bd5a8322c.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_be166aa8-fc5f-4f90-b4bc-e73bd5a8322c.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a23470bb4f89e379f6b780371588d19f9bee0b7 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_be166aa8-fc5f-4f90-b4bc-e73bd5a8322c.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:51:05] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:51:05 2025 --- +[2025-07-05 08:51:05] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:51:05 2025 --- +[2025-07-05 08:51:05] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:51:05] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:51:05] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:51:05] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:51:05] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:51:05] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:51:05] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48 +[2025-07-05 08:51:05] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48 +[2025-07-05 08:51:05] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:51:05] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:51:06] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:51:06] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:51:06] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:51:06] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:51:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:51:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:51:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:51:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:51:08] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:51:08] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:51:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:51:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:51:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:51:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:51:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:51:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:51:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:51:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:51:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:51:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:51:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:51:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:51:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:51:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:51:09] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:51:09] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:51:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:51:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:51:09] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:51:09] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:51:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:51:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:51:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:51:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:51:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:51:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:52:14] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:52:14] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:52:14] [Rank 0] PRINT: Starting training... +[2025-07-05 08:52:14] [Rank 0] PRINT: Starting training... +[2025-07-05 08:52:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:52:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:52:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:52:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:52:23] [Rank 0] step:21/10000 train_time:1536ms step_avg:73.16ms +[2025-07-05 08:52:23] [Rank 0] step:21/10000 train_time:1536ms step_avg:73.16ms +[2025-07-05 08:52:24] [Rank 0] step:41/10000 train_time:2986ms step_avg:72.82ms +[2025-07-05 08:52:24] [Rank 0] step:41/10000 train_time:2986ms step_avg:72.82ms +[2025-07-05 08:52:26] [Rank 0] step:61/10000 train_time:4433ms step_avg:72.68ms +[2025-07-05 08:52:26] [Rank 0] step:61/10000 train_time:4433ms step_avg:72.68ms +[2025-07-05 08:52:27] [Rank 0] step:81/10000 train_time:5884ms step_avg:72.64ms +[2025-07-05 08:52:27] [Rank 0] step:81/10000 train_time:5884ms step_avg:72.64ms +[2025-07-05 08:52:29] [Rank 0] step:101/10000 train_time:7983ms step_avg:79.04ms +[2025-07-05 08:52:29] [Rank 0] step:101/10000 train_time:7983ms step_avg:79.04ms +[2025-07-05 08:52:31] [Rank 0] step:121/10000 train_time:9434ms step_avg:77.97ms +[2025-07-05 08:52:31] [Rank 0] step:121/10000 train_time:9434ms step_avg:77.97ms +[2025-07-05 08:52:32] [Rank 0] step:141/10000 train_time:10883ms step_avg:77.18ms +[2025-07-05 08:52:32] [Rank 0] step:141/10000 train_time:10883ms step_avg:77.18ms +[2025-07-05 08:52:34] [Rank 0] step:161/10000 train_time:12333ms step_avg:76.60ms +[2025-07-05 08:52:34] [Rank 0] step:161/10000 train_time:12333ms step_avg:76.60ms +[2025-07-05 08:52:36] [Rank 0] step:181/10000 train_time:14446ms step_avg:79.81ms +[2025-07-05 08:52:36] [Rank 0] step:181/10000 train_time:14446ms step_avg:79.81ms +[2025-07-05 08:52:37] [Rank 0] step:201/10000 train_time:15880ms step_avg:79.00ms +[2025-07-05 08:52:37] [Rank 0] step:201/10000 train_time:15880ms step_avg:79.00ms +[2025-07-05 08:52:39] [Rank 0] step:221/10000 train_time:17330ms step_avg:78.42ms +[2025-07-05 08:52:39] [Rank 0] step:221/10000 train_time:17330ms step_avg:78.42ms +[2025-07-05 08:52:40] [Rank 0] step:241/10000 train_time:18783ms step_avg:77.94ms +[2025-07-05 08:52:40] [Rank 0] step:241/10000 train_time:18783ms step_avg:77.94ms +[2025-07-05 08:52:42] [Rank 0] step:261/10000 train_time:20237ms step_avg:77.54ms +[2025-07-05 08:52:42] [Rank 0] step:261/10000 train_time:20237ms step_avg:77.54ms +[2025-07-05 08:52:44] [Rank 0] step:281/10000 train_time:22352ms step_avg:79.54ms +[2025-07-05 08:52:44] [Rank 0] step:281/10000 train_time:22352ms step_avg:79.54ms +[2025-07-05 08:52:45] [Rank 0] step:301/10000 train_time:23806ms step_avg:79.09ms +[2025-07-05 08:52:45] [Rank 0] step:301/10000 train_time:23806ms step_avg:79.09ms +[2025-07-05 08:52:47] [Rank 0] step:321/10000 train_time:25264ms step_avg:78.70ms +[2025-07-05 08:52:47] [Rank 0] step:321/10000 train_time:25264ms step_avg:78.70ms +[2025-07-05 08:52:48] [Rank 0] step:341/10000 train_time:27014ms step_avg:79.22ms +[2025-07-05 08:52:48] [Rank 0] step:341/10000 train_time:27014ms step_avg:79.22ms +[2025-07-05 08:52:51] [Rank 0] step:361/10000 train_time:29134ms step_avg:80.70ms +[2025-07-05 08:52:51] [Rank 0] step:361/10000 train_time:29134ms step_avg:80.70ms +[2025-07-05 08:52:52] [Rank 0] step:381/10000 train_time:30574ms step_avg:80.25ms +[2025-07-05 08:52:52] [Rank 0] step:381/10000 train_time:30574ms step_avg:80.25ms +[2025-07-05 08:52:53] [Rank 0] step:401/10000 train_time:32028ms step_avg:79.87ms +[2025-07-05 08:52:53] [Rank 0] step:401/10000 train_time:32028ms step_avg:79.87ms +[2025-07-05 08:52:55] [Rank 0] step:421/10000 train_time:33486ms step_avg:79.54ms +[2025-07-05 08:52:55] [Rank 0] step:421/10000 train_time:33486ms step_avg:79.54ms +[2025-07-05 08:52:56] [Rank 0] step:441/10000 train_time:34946ms step_avg:79.24ms +[2025-07-05 08:52:56] [Rank 0] step:441/10000 train_time:34946ms step_avg:79.24ms +[2025-07-05 08:52:58] [Rank 0] step:461/10000 train_time:36640ms step_avg:79.48ms +[2025-07-05 08:52:58] [Rank 0] step:461/10000 train_time:36640ms step_avg:79.48ms +[2025-07-05 08:53:00] [Rank 0] step:481/10000 train_time:38098ms step_avg:79.21ms +[2025-07-05 08:53:00] [Rank 0] step:481/10000 train_time:38098ms step_avg:79.21ms +[2025-07-05 08:53:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:53:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:53:02] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1019 train_time:39556ms step_avg:79.11ms +[2025-07-05 08:53:02] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1019 train_time:39556ms step_avg:79.11ms +[2025-07-05 08:53:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:53:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..038fc6739c2a278353d4c37685818cd3038099a4 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1e831999-e599-40ae-9cb1-53bb905b3739", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/training_log_1e831999-e599-40ae-9cb1-53bb905b3739.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/training_log_1e831999-e599-40ae-9cb1-53bb905b3739.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2303108485d66c4458037954621b5cc61d2f802 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49/training_log_1e831999-e599-40ae-9cb1-53bb905b3739.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:18:38] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:18:38 2025 --- +[2025-07-05 09:18:38] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:18:38 2025 --- +[2025-07-05 09:18:38] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:18:38] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:18:38] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:18:38] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:18:38] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:18:38] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:18:38] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49 +[2025-07-05 09:18:38] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_49 +[2025-07-05 09:18:38] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:18:38] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:18:38] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:18:38] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:18:38] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:18:38] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:18:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:18:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:18:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:18:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:18:40] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:18:40] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:18:41] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:18:41] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:18:41] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:18:41] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:18:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:18:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:18:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:18:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:18:41] [Rank 0] PRINT: Model returns: +[2025-07-05 09:18:41] [Rank 0] PRINT: Model returns: +[2025-07-05 09:18:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:18:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:18:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:18:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:18:41] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:18:41] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:18:41] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:18:41] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:18:41] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:18:41] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:18:41] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:18:41] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:18:41] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:18:41] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:18:41] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:18:41] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:19:46] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:19:46] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:19:46] [Rank 0] PRINT: Starting training... +[2025-07-05 09:19:46] [Rank 0] PRINT: Starting training... +[2025-07-05 09:19:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:19:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:19:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:19:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:19:56] [Rank 0] step:21/10000 train_time:1874ms step_avg:89.22ms +[2025-07-05 09:19:56] [Rank 0] step:21/10000 train_time:1874ms step_avg:89.22ms +[2025-07-05 09:19:58] [Rank 0] step:41/10000 train_time:3328ms step_avg:81.17ms +[2025-07-05 09:19:58] [Rank 0] step:41/10000 train_time:3328ms step_avg:81.17ms +[2025-07-05 09:19:59] [Rank 0] step:61/10000 train_time:4779ms step_avg:78.35ms +[2025-07-05 09:19:59] [Rank 0] step:61/10000 train_time:4779ms step_avg:78.35ms +[2025-07-05 09:20:01] [Rank 0] step:81/10000 train_time:6233ms step_avg:76.95ms +[2025-07-05 09:20:01] [Rank 0] step:81/10000 train_time:6233ms step_avg:76.95ms +[2025-07-05 09:20:03] [Rank 0] step:101/10000 train_time:8343ms step_avg:82.61ms +[2025-07-05 09:20:03] [Rank 0] step:101/10000 train_time:8343ms step_avg:82.61ms +[2025-07-05 09:20:04] [Rank 0] step:121/10000 train_time:9797ms step_avg:80.97ms +[2025-07-05 09:20:04] [Rank 0] step:121/10000 train_time:9797ms step_avg:80.97ms +[2025-07-05 09:20:06] [Rank 0] step:141/10000 train_time:11252ms step_avg:79.80ms +[2025-07-05 09:20:06] [Rank 0] step:141/10000 train_time:11252ms step_avg:79.80ms +[2025-07-05 09:20:07] [Rank 0] step:161/10000 train_time:12707ms step_avg:78.93ms +[2025-07-05 09:20:07] [Rank 0] step:161/10000 train_time:12707ms step_avg:78.93ms +[2025-07-05 09:20:09] [Rank 0] step:181/10000 train_time:14416ms step_avg:79.65ms +[2025-07-05 09:20:09] [Rank 0] step:181/10000 train_time:14416ms step_avg:79.65ms +[2025-07-05 09:20:11] [Rank 0] step:201/10000 train_time:16272ms step_avg:80.96ms +[2025-07-05 09:20:11] [Rank 0] step:201/10000 train_time:16272ms step_avg:80.96ms +[2025-07-05 09:20:12] [Rank 0] step:221/10000 train_time:17937ms step_avg:81.16ms +[2025-07-05 09:20:12] [Rank 0] step:221/10000 train_time:17937ms step_avg:81.16ms +[2025-07-05 09:20:14] [Rank 0] step:241/10000 train_time:19394ms step_avg:80.47ms +[2025-07-05 09:20:14] [Rank 0] step:241/10000 train_time:19394ms step_avg:80.47ms +[2025-07-05 09:20:15] [Rank 0] step:261/10000 train_time:20851ms step_avg:79.89ms +[2025-07-05 09:20:15] [Rank 0] step:261/10000 train_time:20851ms step_avg:79.89ms +[2025-07-05 09:20:17] [Rank 0] step:281/10000 train_time:22963ms step_avg:81.72ms +[2025-07-05 09:20:17] [Rank 0] step:281/10000 train_time:22963ms step_avg:81.72ms +[2025-07-05 09:20:19] [Rank 0] step:301/10000 train_time:24422ms step_avg:81.14ms +[2025-07-05 09:20:19] [Rank 0] step:301/10000 train_time:24422ms step_avg:81.14ms +[2025-07-05 09:20:20] [Rank 0] step:321/10000 train_time:25881ms step_avg:80.63ms +[2025-07-05 09:20:20] [Rank 0] step:321/10000 train_time:25881ms step_avg:80.63ms +[2025-07-05 09:20:22] [Rank 0] step:341/10000 train_time:27341ms step_avg:80.18ms +[2025-07-05 09:20:22] [Rank 0] step:341/10000 train_time:27341ms step_avg:80.18ms +[2025-07-05 09:20:24] [Rank 0] step:361/10000 train_time:29056ms step_avg:80.49ms +[2025-07-05 09:20:24] [Rank 0] step:361/10000 train_time:29056ms step_avg:80.49ms +[2025-07-05 09:20:25] [Rank 0] step:381/10000 train_time:30931ms step_avg:81.18ms +[2025-07-05 09:20:25] [Rank 0] step:381/10000 train_time:30931ms step_avg:81.18ms +[2025-07-05 09:20:27] [Rank 0] step:401/10000 train_time:32393ms step_avg:80.78ms +[2025-07-05 09:20:27] [Rank 0] step:401/10000 train_time:32393ms step_avg:80.78ms +[2025-07-05 09:20:28] [Rank 0] step:421/10000 train_time:33854ms step_avg:80.41ms +[2025-07-05 09:20:28] [Rank 0] step:421/10000 train_time:33854ms step_avg:80.41ms +[2025-07-05 09:20:30] [Rank 0] step:441/10000 train_time:35317ms step_avg:80.08ms +[2025-07-05 09:20:30] [Rank 0] step:441/10000 train_time:35317ms step_avg:80.08ms +[2025-07-05 09:20:32] [Rank 0] step:461/10000 train_time:37429ms step_avg:81.19ms +[2025-07-05 09:20:32] [Rank 0] step:461/10000 train_time:37429ms step_avg:81.19ms +[2025-07-05 09:20:33] [Rank 0] step:481/10000 train_time:38892ms step_avg:80.86ms +[2025-07-05 09:20:33] [Rank 0] step:481/10000 train_time:38892ms step_avg:80.86ms +[2025-07-05 09:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:36] [Rank 0] PRINT: step:500/10000 train_loss:8.7314 val_loss:7.1029 train_time:40353ms step_avg:80.71ms +[2025-07-05 09:20:36] [Rank 0] PRINT: step:500/10000 train_loss:8.7314 val_loss:7.1029 train_time:40353ms step_avg:80.71ms +[2025-07-05 09:20:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:20:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..285cf1bb20f14b021a3a8228c1bb8cd0a027a40d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8b7bd88f-c6f3-4cac-bef9-c6e33b1631fb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/training_log_8b7bd88f-c6f3-4cac-bef9-c6e33b1631fb.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/training_log_8b7bd88f-c6f3-4cac-bef9-c6e33b1631fb.txt new file mode 100644 index 0000000000000000000000000000000000000000..d95f21b71d437bf1b25c7e05458b056834b6beb1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50/training_log_8b7bd88f-c6f3-4cac-bef9-c6e33b1631fb.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:46:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:46:03 2025 --- +[2025-07-05 09:46:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:46:03 2025 --- +[2025-07-05 09:46:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:46:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:46:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:46:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:46:03] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:46:03] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:46:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50 +[2025-07-05 09:46:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_50 +[2025-07-05 09:46:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:46:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:46:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:46:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:46:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:46:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:46:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:46:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:46:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:46:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:46:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:46:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:46:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:46:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:46:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:46:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:46:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:46:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:46:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:46:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:46:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:46:07] [Rank 0] PRINT: Model returns: +[2025-07-05 09:46:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:46:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:46:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:46:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:46:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:46:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:46:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:46:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:46:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:46:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:46:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:46:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:46:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:46:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:46:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:46:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:47:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:47:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:47:11] [Rank 0] PRINT: Starting training... +[2025-07-05 09:47:11] [Rank 0] PRINT: Starting training... +[2025-07-05 09:47:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:47:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:47:20] [Rank 0] step:21/10000 train_time:1750ms step_avg:83.33ms +[2025-07-05 09:47:20] [Rank 0] step:21/10000 train_time:1750ms step_avg:83.33ms +[2025-07-05 09:47:21] [Rank 0] step:41/10000 train_time:3200ms step_avg:78.05ms +[2025-07-05 09:47:21] [Rank 0] step:41/10000 train_time:3200ms step_avg:78.05ms +[2025-07-05 09:47:23] [Rank 0] step:61/10000 train_time:4651ms step_avg:76.24ms +[2025-07-05 09:47:23] [Rank 0] step:61/10000 train_time:4651ms step_avg:76.24ms +[2025-07-05 09:47:24] [Rank 0] step:81/10000 train_time:6103ms step_avg:75.34ms +[2025-07-05 09:47:24] [Rank 0] step:81/10000 train_time:6103ms step_avg:75.34ms +[2025-07-05 09:47:26] [Rank 0] step:101/10000 train_time:8212ms step_avg:81.31ms +[2025-07-05 09:47:26] [Rank 0] step:101/10000 train_time:8212ms step_avg:81.31ms +[2025-07-05 09:47:28] [Rank 0] step:121/10000 train_time:9664ms step_avg:79.86ms +[2025-07-05 09:47:28] [Rank 0] step:121/10000 train_time:9664ms step_avg:79.86ms +[2025-07-05 09:47:29] [Rank 0] step:141/10000 train_time:11114ms step_avg:78.82ms +[2025-07-05 09:47:29] [Rank 0] step:141/10000 train_time:11114ms step_avg:78.82ms +[2025-07-05 09:47:31] [Rank 0] step:161/10000 train_time:12568ms step_avg:78.06ms +[2025-07-05 09:47:31] [Rank 0] step:161/10000 train_time:12568ms step_avg:78.06ms +[2025-07-05 09:47:33] [Rank 0] step:181/10000 train_time:14023ms step_avg:77.47ms +[2025-07-05 09:47:33] [Rank 0] step:181/10000 train_time:14023ms step_avg:77.47ms +[2025-07-05 09:47:34] [Rank 0] step:201/10000 train_time:16133ms step_avg:80.27ms +[2025-07-05 09:47:34] [Rank 0] step:201/10000 train_time:16133ms step_avg:80.27ms +[2025-07-05 09:47:36] [Rank 0] step:221/10000 train_time:17719ms step_avg:80.17ms +[2025-07-05 09:47:36] [Rank 0] step:221/10000 train_time:17719ms step_avg:80.17ms +[2025-07-05 09:47:37] [Rank 0] step:241/10000 train_time:19244ms step_avg:79.85ms +[2025-07-05 09:47:37] [Rank 0] step:241/10000 train_time:19244ms step_avg:79.85ms +[2025-07-05 09:47:39] [Rank 0] step:261/10000 train_time:20701ms step_avg:79.31ms +[2025-07-05 09:47:39] [Rank 0] step:261/10000 train_time:20701ms step_avg:79.31ms +[2025-07-05 09:47:41] [Rank 0] step:281/10000 train_time:22829ms step_avg:81.24ms +[2025-07-05 09:47:41] [Rank 0] step:281/10000 train_time:22829ms step_avg:81.24ms +[2025-07-05 09:47:43] [Rank 0] step:301/10000 train_time:24288ms step_avg:80.69ms +[2025-07-05 09:47:43] [Rank 0] step:301/10000 train_time:24288ms step_avg:80.69ms +[2025-07-05 09:47:44] [Rank 0] step:321/10000 train_time:25745ms step_avg:80.20ms +[2025-07-05 09:47:44] [Rank 0] step:321/10000 train_time:25745ms step_avg:80.20ms +[2025-07-05 09:47:45] [Rank 0] step:341/10000 train_time:27202ms step_avg:79.77ms +[2025-07-05 09:47:45] [Rank 0] step:341/10000 train_time:27202ms step_avg:79.77ms +[2025-07-05 09:47:47] [Rank 0] step:361/10000 train_time:28816ms step_avg:79.82ms +[2025-07-05 09:47:47] [Rank 0] step:361/10000 train_time:28816ms step_avg:79.82ms +[2025-07-05 09:47:48] [Rank 0] step:381/10000 train_time:30256ms step_avg:79.41ms +[2025-07-05 09:47:48] [Rank 0] step:381/10000 train_time:30256ms step_avg:79.41ms +[2025-07-05 09:47:50] [Rank 0] step:401/10000 train_time:31717ms step_avg:79.09ms +[2025-07-05 09:47:50] [Rank 0] step:401/10000 train_time:31717ms step_avg:79.09ms +[2025-07-05 09:47:51] [Rank 0] step:421/10000 train_time:33175ms step_avg:78.80ms +[2025-07-05 09:47:51] [Rank 0] step:421/10000 train_time:33175ms step_avg:78.80ms +[2025-07-05 09:47:53] [Rank 0] step:441/10000 train_time:34632ms step_avg:78.53ms +[2025-07-05 09:47:53] [Rank 0] step:441/10000 train_time:34632ms step_avg:78.53ms +[2025-07-05 09:47:55] [Rank 0] step:461/10000 train_time:36754ms step_avg:79.73ms +[2025-07-05 09:47:55] [Rank 0] step:461/10000 train_time:36754ms step_avg:79.73ms +[2025-07-05 09:47:56] [Rank 0] step:481/10000 train_time:38212ms step_avg:79.44ms +[2025-07-05 09:47:56] [Rank 0] step:481/10000 train_time:38212ms step_avg:79.44ms +[2025-07-05 09:47:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:59] [Rank 0] PRINT: step:500/10000 train_loss:8.7309 val_loss:7.1017 train_time:39672ms step_avg:79.34ms +[2025-07-05 09:47:59] [Rank 0] PRINT: step:500/10000 train_loss:8.7309 val_loss:7.1017 train_time:39672ms step_avg:79.34ms +[2025-07-05 09:47:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:47:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a562e3cf1c6040a6f185db8382faa0e1f1813165 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "aa7bc697-bed3-48cd-b693-0210b31f10a1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/training_log_aa7bc697-bed3-48cd-b693-0210b31f10a1.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/training_log_aa7bc697-bed3-48cd-b693-0210b31f10a1.txt new file mode 100644 index 0000000000000000000000000000000000000000..a827f5eceb440ff6f0b15b4ec7537a1d656eac33 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51/training_log_aa7bc697-bed3-48cd-b693-0210b31f10a1.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:13:24] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:13:24 2025 --- +[2025-07-05 10:13:24] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:13:24 2025 --- +[2025-07-05 10:13:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:13:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:13:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:13:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:13:24] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:13:24] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:13:24] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51 +[2025-07-05 10:13:24] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_51 +[2025-07-05 10:13:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:13:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:13:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:13:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:13:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:13:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:13:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:13:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:13:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:13:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:13:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:13:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:13:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:13:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:13:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:13:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:13:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:13:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:13:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:13:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:13:28] [Rank 0] PRINT: Model returns: +[2025-07-05 10:13:28] [Rank 0] PRINT: Model returns: +[2025-07-05 10:13:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:13:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:13:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:13:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:13:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:13:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:13:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:13:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:13:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:13:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:13:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:13:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:13:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:13:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:13:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:13:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:14:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:14:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:14:34] [Rank 0] PRINT: Starting training... +[2025-07-05 10:14:34] [Rank 0] PRINT: Starting training... +[2025-07-05 10:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:14:41] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:14:41] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:14:43] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-05 10:14:43] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-05 10:14:44] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.20ms +[2025-07-05 10:14:44] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.20ms +[2025-07-05 10:14:46] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 10:14:46] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 10:14:47] [Rank 0] step:81/10000 train_time:6116ms step_avg:75.51ms +[2025-07-05 10:14:47] [Rank 0] step:81/10000 train_time:6116ms step_avg:75.51ms +[2025-07-05 10:14:49] [Rank 0] step:101/10000 train_time:8233ms step_avg:81.51ms +[2025-07-05 10:14:49] [Rank 0] step:101/10000 train_time:8233ms step_avg:81.51ms +[2025-07-05 10:14:51] [Rank 0] step:121/10000 train_time:9685ms step_avg:80.04ms +[2025-07-05 10:14:51] [Rank 0] step:121/10000 train_time:9685ms step_avg:80.04ms +[2025-07-05 10:14:52] [Rank 0] step:141/10000 train_time:11144ms step_avg:79.03ms +[2025-07-05 10:14:52] [Rank 0] step:141/10000 train_time:11144ms step_avg:79.03ms +[2025-07-05 10:14:54] [Rank 0] step:161/10000 train_time:12600ms step_avg:78.26ms +[2025-07-05 10:14:54] [Rank 0] step:161/10000 train_time:12600ms step_avg:78.26ms +[2025-07-05 10:14:55] [Rank 0] step:181/10000 train_time:14310ms step_avg:79.06ms +[2025-07-05 10:14:55] [Rank 0] step:181/10000 train_time:14310ms step_avg:79.06ms +[2025-07-05 10:14:57] [Rank 0] step:201/10000 train_time:15748ms step_avg:78.35ms +[2025-07-05 10:14:57] [Rank 0] step:201/10000 train_time:15748ms step_avg:78.35ms +[2025-07-05 10:14:58] [Rank 0] step:221/10000 train_time:17204ms step_avg:77.85ms +[2025-07-05 10:14:58] [Rank 0] step:221/10000 train_time:17204ms step_avg:77.85ms +[2025-07-05 10:15:00] [Rank 0] step:241/10000 train_time:18663ms step_avg:77.44ms +[2025-07-05 10:15:00] [Rank 0] step:241/10000 train_time:18663ms step_avg:77.44ms +[2025-07-05 10:15:02] [Rank 0] step:261/10000 train_time:20409ms step_avg:78.20ms +[2025-07-05 10:15:02] [Rank 0] step:261/10000 train_time:20409ms step_avg:78.20ms +[2025-07-05 10:15:04] [Rank 0] step:281/10000 train_time:22533ms step_avg:80.19ms +[2025-07-05 10:15:04] [Rank 0] step:281/10000 train_time:22533ms step_avg:80.19ms +[2025-07-05 10:15:05] [Rank 0] step:301/10000 train_time:23991ms step_avg:79.70ms +[2025-07-05 10:15:05] [Rank 0] step:301/10000 train_time:23991ms step_avg:79.70ms +[2025-07-05 10:15:07] [Rank 0] step:321/10000 train_time:25451ms step_avg:79.29ms +[2025-07-05 10:15:07] [Rank 0] step:321/10000 train_time:25451ms step_avg:79.29ms +[2025-07-05 10:15:08] [Rank 0] step:341/10000 train_time:26910ms step_avg:78.92ms +[2025-07-05 10:15:08] [Rank 0] step:341/10000 train_time:26910ms step_avg:78.92ms +[2025-07-05 10:15:10] [Rank 0] step:361/10000 train_time:29063ms step_avg:80.51ms +[2025-07-05 10:15:10] [Rank 0] step:361/10000 train_time:29063ms step_avg:80.51ms +[2025-07-05 10:15:12] [Rank 0] step:381/10000 train_time:30513ms step_avg:80.09ms +[2025-07-05 10:15:12] [Rank 0] step:381/10000 train_time:30513ms step_avg:80.09ms +[2025-07-05 10:15:13] [Rank 0] step:401/10000 train_time:31986ms step_avg:79.77ms +[2025-07-05 10:15:13] [Rank 0] step:401/10000 train_time:31986ms step_avg:79.77ms +[2025-07-05 10:15:15] [Rank 0] step:421/10000 train_time:33450ms step_avg:79.45ms +[2025-07-05 10:15:15] [Rank 0] step:421/10000 train_time:33450ms step_avg:79.45ms +[2025-07-05 10:15:16] [Rank 0] step:441/10000 train_time:34916ms step_avg:79.17ms +[2025-07-05 10:15:16] [Rank 0] step:441/10000 train_time:34916ms step_avg:79.17ms +[2025-07-05 10:15:18] [Rank 0] step:461/10000 train_time:37030ms step_avg:80.33ms +[2025-07-05 10:15:18] [Rank 0] step:461/10000 train_time:37030ms step_avg:80.33ms +[2025-07-05 10:15:20] [Rank 0] step:481/10000 train_time:38492ms step_avg:80.03ms +[2025-07-05 10:15:20] [Rank 0] step:481/10000 train_time:38492ms step_avg:80.03ms +[2025-07-05 10:15:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:15:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:15:22] [Rank 0] PRINT: step:500/10000 train_loss:8.7309 val_loss:7.1017 train_time:39953ms step_avg:79.91ms +[2025-07-05 10:15:22] [Rank 0] PRINT: step:500/10000 train_loss:8.7309 val_loss:7.1017 train_time:39953ms step_avg:79.91ms +[2025-07-05 10:15:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:15:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f0ef41b84cc11444244c7d7e69db5059e59c5744 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f94b4d2a-73f5-4cd7-83bd-840a50b3bac1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_f94b4d2a-73f5-4cd7-83bd-840a50b3bac1.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_f94b4d2a-73f5-4cd7-83bd-840a50b3bac1.txt new file mode 100644 index 0000000000000000000000000000000000000000..c340aac90841abaee1352ee2a25185314f576732 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_f94b4d2a-73f5-4cd7-83bd-840a50b3bac1.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:12:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:12:21 2025 --- +[2025-07-05 08:12:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:12:21 2025 --- +[2025-07-05 08:12:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:12:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:12:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:12:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:12:22] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:12:22] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:12:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42 +[2025-07-05 08:12:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42 +[2025-07-05 08:12:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:12:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:12:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:12:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:12:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:12:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:12:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:12:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:12:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:12:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:12:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:12:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:12:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:12:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:12:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:12:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:12:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:12:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:12:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:12:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:12:25] [Rank 0] PRINT: Model returns: +[2025-07-05 08:12:25] [Rank 0] PRINT: Model returns: +[2025-07-05 08:12:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:12:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:12:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:12:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:12:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:12:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:12:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:12:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:12:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:12:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:12:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:12:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:12:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:12:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:12:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:12:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:13:58] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:13:58] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:13:58] [Rank 0] PRINT: Starting training... +[2025-07-05 08:13:58] [Rank 0] PRINT: Starting training... +[2025-07-05 08:13:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:13:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:14:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:14:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:14:07] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.60ms +[2025-07-05 08:14:07] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.60ms +[2025-07-05 08:14:09] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.59ms +[2025-07-05 08:14:09] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.59ms +[2025-07-05 08:14:10] [Rank 0] step:61/10000 train_time:4552ms step_avg:74.62ms +[2025-07-05 08:14:10] [Rank 0] step:61/10000 train_time:4552ms step_avg:74.62ms +[2025-07-05 08:14:12] [Rank 0] step:81/10000 train_time:6009ms step_avg:74.18ms +[2025-07-05 08:14:12] [Rank 0] step:81/10000 train_time:6009ms step_avg:74.18ms +[2025-07-05 08:14:13] [Rank 0] step:101/10000 train_time:7704ms step_avg:76.28ms +[2025-07-05 08:14:13] [Rank 0] step:101/10000 train_time:7704ms step_avg:76.28ms +[2025-07-05 08:14:15] [Rank 0] step:121/10000 train_time:9162ms step_avg:75.72ms +[2025-07-05 08:14:15] [Rank 0] step:121/10000 train_time:9162ms step_avg:75.72ms +[2025-07-05 08:14:16] [Rank 0] step:141/10000 train_time:10616ms step_avg:75.29ms +[2025-07-05 08:14:16] [Rank 0] step:141/10000 train_time:10616ms step_avg:75.29ms +[2025-07-05 08:14:18] [Rank 0] step:161/10000 train_time:12077ms step_avg:75.01ms +[2025-07-05 08:14:18] [Rank 0] step:161/10000 train_time:12077ms step_avg:75.01ms +[2025-07-05 08:14:20] [Rank 0] step:181/10000 train_time:13536ms step_avg:74.78ms +[2025-07-05 08:14:20] [Rank 0] step:181/10000 train_time:13536ms step_avg:74.78ms +[2025-07-05 08:14:21] [Rank 0] step:201/10000 train_time:15647ms step_avg:77.85ms +[2025-07-05 08:14:21] [Rank 0] step:201/10000 train_time:15647ms step_avg:77.85ms +[2025-07-05 08:14:23] [Rank 0] step:221/10000 train_time:17106ms step_avg:77.40ms +[2025-07-05 08:14:23] [Rank 0] step:221/10000 train_time:17106ms step_avg:77.40ms +[2025-07-05 08:14:24] [Rank 0] step:241/10000 train_time:18565ms step_avg:77.04ms +[2025-07-05 08:14:24] [Rank 0] step:241/10000 train_time:18565ms step_avg:77.04ms +[2025-07-05 08:14:26] [Rank 0] step:261/10000 train_time:20029ms step_avg:76.74ms +[2025-07-05 08:14:26] [Rank 0] step:261/10000 train_time:20029ms step_avg:76.74ms +[2025-07-05 08:14:27] [Rank 0] step:281/10000 train_time:21730ms step_avg:77.33ms +[2025-07-05 08:14:27] [Rank 0] step:281/10000 train_time:21730ms step_avg:77.33ms +[2025-07-05 08:14:29] [Rank 0] step:301/10000 train_time:23200ms step_avg:77.07ms +[2025-07-05 08:14:29] [Rank 0] step:301/10000 train_time:23200ms step_avg:77.07ms +[2025-07-05 08:14:30] [Rank 0] step:321/10000 train_time:24665ms step_avg:76.84ms +[2025-07-05 08:14:30] [Rank 0] step:321/10000 train_time:24665ms step_avg:76.84ms +[2025-07-05 08:14:32] [Rank 0] step:341/10000 train_time:26133ms step_avg:76.64ms +[2025-07-05 08:14:32] [Rank 0] step:341/10000 train_time:26133ms step_avg:76.64ms +[2025-07-05 08:14:34] [Rank 0] step:361/10000 train_time:27653ms step_avg:76.60ms +[2025-07-05 08:14:34] [Rank 0] step:361/10000 train_time:27653ms step_avg:76.60ms +[2025-07-05 08:14:35] [Rank 0] step:381/10000 train_time:29737ms step_avg:78.05ms +[2025-07-05 08:14:35] [Rank 0] step:381/10000 train_time:29737ms step_avg:78.05ms +[2025-07-05 08:14:37] [Rank 0] step:401/10000 train_time:31207ms step_avg:77.82ms +[2025-07-05 08:14:37] [Rank 0] step:401/10000 train_time:31207ms step_avg:77.82ms +[2025-07-05 08:14:38] [Rank 0] step:421/10000 train_time:32676ms step_avg:77.62ms +[2025-07-05 08:14:38] [Rank 0] step:421/10000 train_time:32676ms step_avg:77.62ms +[2025-07-05 08:14:40] [Rank 0] step:441/10000 train_time:34147ms step_avg:77.43ms +[2025-07-05 08:14:40] [Rank 0] step:441/10000 train_time:34147ms step_avg:77.43ms +[2025-07-05 08:14:41] [Rank 0] step:461/10000 train_time:35850ms step_avg:77.77ms +[2025-07-05 08:14:41] [Rank 0] step:461/10000 train_time:35850ms step_avg:77.77ms +[2025-07-05 08:14:43] [Rank 0] step:481/10000 train_time:37321ms step_avg:77.59ms +[2025-07-05 08:14:43] [Rank 0] step:481/10000 train_time:37321ms step_avg:77.59ms +[2025-07-05 08:14:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:14:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:14:45] [Rank 0] PRINT: step:500/10000 train_loss:6.8636 val_loss:4.3872 train_time:38789ms step_avg:77.58ms +[2025-07-05 08:14:45] [Rank 0] PRINT: step:500/10000 train_loss:6.8636 val_loss:4.3872 train_time:38789ms step_avg:77.58ms +[2025-07-05 08:14:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:14:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..28cc891d433f64a4b1c8913cc65477ccf018b9f1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "857202da-15a0-4538-b025-7e9b43fca2d4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_857202da-15a0-4538-b025-7e9b43fca2d4.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_857202da-15a0-4538-b025-7e9b43fca2d4.txt new file mode 100644 index 0000000000000000000000000000000000000000..336e8d6b27037120aa031cf8abbe17c8f68688ac --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_857202da-15a0-4538-b025-7e9b43fca2d4.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:40:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:40:09 2025 --- +[2025-07-05 08:40:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:40:09 2025 --- +[2025-07-05 08:40:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:40:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:40:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:40:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:40:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:40:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:40:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43 +[2025-07-05 08:40:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43 +[2025-07-05 08:40:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:40:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:40:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:40:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:40:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:40:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:40:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:40:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:40:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:40:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:40:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:40:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:40:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:40:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:40:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:40:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:40:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:40:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:40:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:40:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:40:12] [Rank 0] PRINT: Model returns: +[2025-07-05 08:40:12] [Rank 0] PRINT: Model returns: +[2025-07-05 08:40:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:40:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:40:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:40:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:40:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:40:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:40:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:40:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:40:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:40:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:40:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:40:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:40:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:40:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:40:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:40:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:41:16] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:41:16] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:41:16] [Rank 0] PRINT: Starting training... +[2025-07-05 08:41:16] [Rank 0] PRINT: Starting training... +[2025-07-05 08:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:41:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:41:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:41:25] [Rank 0] step:21/10000 train_time:1643ms step_avg:78.22ms +[2025-07-05 08:41:25] [Rank 0] step:21/10000 train_time:1643ms step_avg:78.22ms +[2025-07-05 08:41:27] [Rank 0] step:41/10000 train_time:3160ms step_avg:77.08ms +[2025-07-05 08:41:27] [Rank 0] step:41/10000 train_time:3160ms step_avg:77.08ms +[2025-07-05 08:41:28] [Rank 0] step:61/10000 train_time:4615ms step_avg:75.65ms +[2025-07-05 08:41:28] [Rank 0] step:61/10000 train_time:4615ms step_avg:75.65ms +[2025-07-05 08:41:30] [Rank 0] step:81/10000 train_time:6067ms step_avg:74.90ms +[2025-07-05 08:41:30] [Rank 0] step:81/10000 train_time:6067ms step_avg:74.90ms +[2025-07-05 08:41:32] [Rank 0] step:101/10000 train_time:8179ms step_avg:80.98ms +[2025-07-05 08:41:32] [Rank 0] step:101/10000 train_time:8179ms step_avg:80.98ms +[2025-07-05 08:41:33] [Rank 0] step:121/10000 train_time:9633ms step_avg:79.62ms +[2025-07-05 08:41:33] [Rank 0] step:121/10000 train_time:9633ms step_avg:79.62ms +[2025-07-05 08:41:35] [Rank 0] step:141/10000 train_time:11091ms step_avg:78.66ms +[2025-07-05 08:41:35] [Rank 0] step:141/10000 train_time:11091ms step_avg:78.66ms +[2025-07-05 08:41:36] [Rank 0] step:161/10000 train_time:12550ms step_avg:77.95ms +[2025-07-05 08:41:36] [Rank 0] step:161/10000 train_time:12550ms step_avg:77.95ms +[2025-07-05 08:41:38] [Rank 0] step:181/10000 train_time:14261ms step_avg:78.79ms +[2025-07-05 08:41:38] [Rank 0] step:181/10000 train_time:14261ms step_avg:78.79ms +[2025-07-05 08:41:40] [Rank 0] step:201/10000 train_time:15702ms step_avg:78.12ms +[2025-07-05 08:41:40] [Rank 0] step:201/10000 train_time:15702ms step_avg:78.12ms +[2025-07-05 08:41:41] [Rank 0] step:221/10000 train_time:17162ms step_avg:77.66ms +[2025-07-05 08:41:41] [Rank 0] step:221/10000 train_time:17162ms step_avg:77.66ms +[2025-07-05 08:41:42] [Rank 0] step:241/10000 train_time:18627ms step_avg:77.29ms +[2025-07-05 08:41:42] [Rank 0] step:241/10000 train_time:18627ms step_avg:77.29ms +[2025-07-05 08:41:44] [Rank 0] step:261/10000 train_time:20090ms step_avg:76.97ms +[2025-07-05 08:41:44] [Rank 0] step:261/10000 train_time:20090ms step_avg:76.97ms +[2025-07-05 08:41:46] [Rank 0] step:281/10000 train_time:21793ms step_avg:77.56ms +[2025-07-05 08:41:46] [Rank 0] step:281/10000 train_time:21793ms step_avg:77.56ms +[2025-07-05 08:41:47] [Rank 0] step:301/10000 train_time:23258ms step_avg:77.27ms +[2025-07-05 08:41:47] [Rank 0] step:301/10000 train_time:23258ms step_avg:77.27ms +[2025-07-05 08:41:49] [Rank 0] step:321/10000 train_time:24724ms step_avg:77.02ms +[2025-07-05 08:41:49] [Rank 0] step:321/10000 train_time:24724ms step_avg:77.02ms +[2025-07-05 08:41:50] [Rank 0] step:341/10000 train_time:26192ms step_avg:76.81ms +[2025-07-05 08:41:50] [Rank 0] step:341/10000 train_time:26192ms step_avg:76.81ms +[2025-07-05 08:41:52] [Rank 0] step:361/10000 train_time:27709ms step_avg:76.76ms +[2025-07-05 08:41:52] [Rank 0] step:361/10000 train_time:27709ms step_avg:76.76ms +[2025-07-05 08:41:54] [Rank 0] step:381/10000 train_time:29780ms step_avg:78.16ms +[2025-07-05 08:41:54] [Rank 0] step:381/10000 train_time:29780ms step_avg:78.16ms +[2025-07-05 08:41:55] [Rank 0] step:401/10000 train_time:31249ms step_avg:77.93ms +[2025-07-05 08:41:55] [Rank 0] step:401/10000 train_time:31249ms step_avg:77.93ms +[2025-07-05 08:41:57] [Rank 0] step:421/10000 train_time:32733ms step_avg:77.75ms +[2025-07-05 08:41:57] [Rank 0] step:421/10000 train_time:32733ms step_avg:77.75ms +[2025-07-05 08:41:58] [Rank 0] step:441/10000 train_time:34202ms step_avg:77.56ms +[2025-07-05 08:41:58] [Rank 0] step:441/10000 train_time:34202ms step_avg:77.56ms +[2025-07-05 08:42:00] [Rank 0] step:461/10000 train_time:36310ms step_avg:78.76ms +[2025-07-05 08:42:00] [Rank 0] step:461/10000 train_time:36310ms step_avg:78.76ms +[2025-07-05 08:42:02] [Rank 0] step:481/10000 train_time:37778ms step_avg:78.54ms +[2025-07-05 08:42:02] [Rank 0] step:481/10000 train_time:37778ms step_avg:78.54ms +[2025-07-05 08:42:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:42:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:42:04] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3919 train_time:39249ms step_avg:78.50ms +[2025-07-05 08:42:04] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3919 train_time:39249ms step_avg:78.50ms +[2025-07-05 08:42:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:42:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9686a6e1a6cd6b2f37b4d2774d3a892ab4b6e434 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "bf9a28c6-e25a-409b-ba5c-68a6818a129d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/training_log_bf9a28c6-e25a-409b-ba5c-68a6818a129d.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/training_log_bf9a28c6-e25a-409b-ba5c-68a6818a129d.txt new file mode 100644 index 0000000000000000000000000000000000000000..7327b46bf93df479382d35a802224f1e4988be66 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44/training_log_bf9a28c6-e25a-409b-ba5c-68a6818a129d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:07:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:07:20 2025 --- +[2025-07-05 09:07:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:07:20 2025 --- +[2025-07-05 09:07:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:07:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:07:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:07:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:07:20] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:07:20] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:07:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44 +[2025-07-05 09:07:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_44 +[2025-07-05 09:07:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:07:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:07:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:07:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:07:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:07:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:07:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:07:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:07:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:07:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:07:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:07:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:07:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:07:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:07:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:07:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:07:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:07:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:07:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:07:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:07:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:07:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:07:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:07:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:07:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:07:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:07:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:07:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:07:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:07:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:07:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:07:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:07:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:07:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:07:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:07:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:07:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:07:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:08:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:08:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:08:53] [Rank 0] PRINT: Starting training... +[2025-07-05 09:08:53] [Rank 0] PRINT: Starting training... +[2025-07-05 09:08:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:08:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:09:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:09:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:09:03] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.45ms +[2025-07-05 09:09:03] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.45ms +[2025-07-05 09:09:04] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.20ms +[2025-07-05 09:09:04] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.20ms +[2025-07-05 09:09:06] [Rank 0] step:61/10000 train_time:4658ms step_avg:76.36ms +[2025-07-05 09:09:06] [Rank 0] step:61/10000 train_time:4658ms step_avg:76.36ms +[2025-07-05 09:09:07] [Rank 0] step:81/10000 train_time:6110ms step_avg:75.43ms +[2025-07-05 09:09:07] [Rank 0] step:81/10000 train_time:6110ms step_avg:75.43ms +[2025-07-05 09:09:09] [Rank 0] step:101/10000 train_time:7802ms step_avg:77.24ms +[2025-07-05 09:09:09] [Rank 0] step:101/10000 train_time:7802ms step_avg:77.24ms +[2025-07-05 09:09:10] [Rank 0] step:121/10000 train_time:9256ms step_avg:76.50ms +[2025-07-05 09:09:10] [Rank 0] step:121/10000 train_time:9256ms step_avg:76.50ms +[2025-07-05 09:09:12] [Rank 0] step:141/10000 train_time:10712ms step_avg:75.97ms +[2025-07-05 09:09:12] [Rank 0] step:141/10000 train_time:10712ms step_avg:75.97ms +[2025-07-05 09:09:13] [Rank 0] step:161/10000 train_time:12169ms step_avg:75.58ms +[2025-07-05 09:09:13] [Rank 0] step:161/10000 train_time:12169ms step_avg:75.58ms +[2025-07-05 09:09:15] [Rank 0] step:181/10000 train_time:13681ms step_avg:75.58ms +[2025-07-05 09:09:15] [Rank 0] step:181/10000 train_time:13681ms step_avg:75.58ms +[2025-07-05 09:09:16] [Rank 0] step:201/10000 train_time:15322ms step_avg:76.23ms +[2025-07-05 09:09:16] [Rank 0] step:201/10000 train_time:15322ms step_avg:76.23ms +[2025-07-05 09:09:18] [Rank 0] step:221/10000 train_time:16781ms step_avg:75.93ms +[2025-07-05 09:09:18] [Rank 0] step:221/10000 train_time:16781ms step_avg:75.93ms +[2025-07-05 09:09:19] [Rank 0] step:241/10000 train_time:18242ms step_avg:75.69ms +[2025-07-05 09:09:19] [Rank 0] step:241/10000 train_time:18242ms step_avg:75.69ms +[2025-07-05 09:09:21] [Rank 0] step:261/10000 train_time:19703ms step_avg:75.49ms +[2025-07-05 09:09:21] [Rank 0] step:261/10000 train_time:19703ms step_avg:75.49ms +[2025-07-05 09:09:22] [Rank 0] step:281/10000 train_time:21402ms step_avg:76.17ms +[2025-07-05 09:09:22] [Rank 0] step:281/10000 train_time:21402ms step_avg:76.17ms +[2025-07-05 09:09:24] [Rank 0] step:301/10000 train_time:22864ms step_avg:75.96ms +[2025-07-05 09:09:24] [Rank 0] step:301/10000 train_time:22864ms step_avg:75.96ms +[2025-07-05 09:09:25] [Rank 0] step:321/10000 train_time:24328ms step_avg:75.79ms +[2025-07-05 09:09:25] [Rank 0] step:321/10000 train_time:24328ms step_avg:75.79ms +[2025-07-05 09:09:27] [Rank 0] step:341/10000 train_time:25793ms step_avg:75.64ms +[2025-07-05 09:09:27] [Rank 0] step:341/10000 train_time:25793ms step_avg:75.64ms +[2025-07-05 09:09:28] [Rank 0] step:361/10000 train_time:27308ms step_avg:75.65ms +[2025-07-05 09:09:28] [Rank 0] step:361/10000 train_time:27308ms step_avg:75.65ms +[2025-07-05 09:09:30] [Rank 0] step:381/10000 train_time:28958ms step_avg:76.01ms +[2025-07-05 09:09:30] [Rank 0] step:381/10000 train_time:28958ms step_avg:76.01ms +[2025-07-05 09:09:31] [Rank 0] step:401/10000 train_time:30424ms step_avg:75.87ms +[2025-07-05 09:09:31] [Rank 0] step:401/10000 train_time:30424ms step_avg:75.87ms +[2025-07-05 09:09:33] [Rank 0] step:421/10000 train_time:31892ms step_avg:75.75ms +[2025-07-05 09:09:33] [Rank 0] step:421/10000 train_time:31892ms step_avg:75.75ms +[2025-07-05 09:09:34] [Rank 0] step:441/10000 train_time:33357ms step_avg:75.64ms +[2025-07-05 09:09:34] [Rank 0] step:441/10000 train_time:33357ms step_avg:75.64ms +[2025-07-05 09:09:36] [Rank 0] step:461/10000 train_time:35061ms step_avg:76.05ms +[2025-07-05 09:09:36] [Rank 0] step:461/10000 train_time:35061ms step_avg:76.05ms +[2025-07-05 09:09:37] [Rank 0] step:481/10000 train_time:36528ms step_avg:75.94ms +[2025-07-05 09:09:37] [Rank 0] step:481/10000 train_time:36528ms step_avg:75.94ms +[2025-07-05 09:09:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:09:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:09:40] [Rank 0] PRINT: step:500/10000 train_loss:6.8651 val_loss:4.3919 train_time:37995ms step_avg:75.99ms +[2025-07-05 09:09:40] [Rank 0] PRINT: step:500/10000 train_loss:6.8651 val_loss:4.3919 train_time:37995ms step_avg:75.99ms +[2025-07-05 09:09:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:09:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a67968b8713d4e59876ba4120bfd52875d55f97e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "72efacfa-0471-4b59-82cc-b4cfcb082633", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_72efacfa-0471-4b59-82cc-b4cfcb082633.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_72efacfa-0471-4b59-82cc-b4cfcb082633.txt new file mode 100644 index 0000000000000000000000000000000000000000..deded2cc0775ba86715e1db7ed8f0bf58a66330a --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_72efacfa-0471-4b59-82cc-b4cfcb082633.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:34:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:34:45 2025 --- +[2025-07-05 09:34:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:34:45 2025 --- +[2025-07-05 09:34:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:34:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:34:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:34:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:34:45] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:34:45] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:34:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45 +[2025-07-05 09:34:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45 +[2025-07-05 09:34:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:34:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:34:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:34:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:34:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:34:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:34:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:34:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:34:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:34:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:34:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:34:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:34:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:34:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:34:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:34:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:34:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:34:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:34:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:34:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:34:48] [Rank 0] PRINT: Model returns: +[2025-07-05 09:34:48] [Rank 0] PRINT: Model returns: +[2025-07-05 09:34:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:34:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:34:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:34:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:34:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:34:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:34:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:34:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:34:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:34:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:34:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:34:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:34:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:34:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:34:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:34:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:35:54] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:35:54] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:35:54] [Rank 0] PRINT: Starting training... +[2025-07-05 09:35:54] [Rank 0] PRINT: Starting training... +[2025-07-05 09:35:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:35:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:36:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:36:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:36:03] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-05 09:36:03] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-05 09:36:05] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.29ms +[2025-07-05 09:36:05] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.29ms +[2025-07-05 09:36:06] [Rank 0] step:61/10000 train_time:4667ms step_avg:76.51ms +[2025-07-05 09:36:06] [Rank 0] step:61/10000 train_time:4667ms step_avg:76.51ms +[2025-07-05 09:36:08] [Rank 0] step:81/10000 train_time:6124ms step_avg:75.61ms +[2025-07-05 09:36:08] [Rank 0] step:81/10000 train_time:6124ms step_avg:75.61ms +[2025-07-05 09:36:09] [Rank 0] step:101/10000 train_time:7620ms step_avg:75.45ms +[2025-07-05 09:36:09] [Rank 0] step:101/10000 train_time:7620ms step_avg:75.45ms +[2025-07-05 09:36:11] [Rank 0] step:121/10000 train_time:9080ms step_avg:75.04ms +[2025-07-05 09:36:11] [Rank 0] step:121/10000 train_time:9080ms step_avg:75.04ms +[2025-07-05 09:36:12] [Rank 0] step:141/10000 train_time:10674ms step_avg:75.70ms +[2025-07-05 09:36:12] [Rank 0] step:141/10000 train_time:10674ms step_avg:75.70ms +[2025-07-05 09:36:14] [Rank 0] step:161/10000 train_time:12198ms step_avg:75.77ms +[2025-07-05 09:36:14] [Rank 0] step:161/10000 train_time:12198ms step_avg:75.77ms +[2025-07-05 09:36:16] [Rank 0] step:181/10000 train_time:13848ms step_avg:76.51ms +[2025-07-05 09:36:16] [Rank 0] step:181/10000 train_time:13848ms step_avg:76.51ms +[2025-07-05 09:36:17] [Rank 0] step:201/10000 train_time:15496ms step_avg:77.09ms +[2025-07-05 09:36:17] [Rank 0] step:201/10000 train_time:15496ms step_avg:77.09ms +[2025-07-05 09:36:18] [Rank 0] step:221/10000 train_time:16961ms step_avg:76.75ms +[2025-07-05 09:36:18] [Rank 0] step:221/10000 train_time:16961ms step_avg:76.75ms +[2025-07-05 09:36:20] [Rank 0] step:241/10000 train_time:18527ms step_avg:76.88ms +[2025-07-05 09:36:20] [Rank 0] step:241/10000 train_time:18527ms step_avg:76.88ms +[2025-07-05 09:36:21] [Rank 0] step:261/10000 train_time:19994ms step_avg:76.61ms +[2025-07-05 09:36:21] [Rank 0] step:261/10000 train_time:19994ms step_avg:76.61ms +[2025-07-05 09:36:23] [Rank 0] step:281/10000 train_time:21695ms step_avg:77.21ms +[2025-07-05 09:36:23] [Rank 0] step:281/10000 train_time:21695ms step_avg:77.21ms +[2025-07-05 09:36:25] [Rank 0] step:301/10000 train_time:23165ms step_avg:76.96ms +[2025-07-05 09:36:25] [Rank 0] step:301/10000 train_time:23165ms step_avg:76.96ms +[2025-07-05 09:36:26] [Rank 0] step:321/10000 train_time:24637ms step_avg:76.75ms +[2025-07-05 09:36:26] [Rank 0] step:321/10000 train_time:24637ms step_avg:76.75ms +[2025-07-05 09:36:28] [Rank 0] step:341/10000 train_time:26106ms step_avg:76.56ms +[2025-07-05 09:36:28] [Rank 0] step:341/10000 train_time:26106ms step_avg:76.56ms +[2025-07-05 09:36:29] [Rank 0] step:361/10000 train_time:27628ms step_avg:76.53ms +[2025-07-05 09:36:29] [Rank 0] step:361/10000 train_time:27628ms step_avg:76.53ms +[2025-07-05 09:36:31] [Rank 0] step:381/10000 train_time:29081ms step_avg:76.33ms +[2025-07-05 09:36:31] [Rank 0] step:381/10000 train_time:29081ms step_avg:76.33ms +[2025-07-05 09:36:32] [Rank 0] step:401/10000 train_time:30554ms step_avg:76.20ms +[2025-07-05 09:36:32] [Rank 0] step:401/10000 train_time:30554ms step_avg:76.20ms +[2025-07-05 09:36:34] [Rank 0] step:421/10000 train_time:32026ms step_avg:76.07ms +[2025-07-05 09:36:34] [Rank 0] step:421/10000 train_time:32026ms step_avg:76.07ms +[2025-07-05 09:36:35] [Rank 0] step:441/10000 train_time:33499ms step_avg:75.96ms +[2025-07-05 09:36:35] [Rank 0] step:441/10000 train_time:33499ms step_avg:75.96ms +[2025-07-05 09:36:37] [Rank 0] step:461/10000 train_time:35206ms step_avg:76.37ms +[2025-07-05 09:36:37] [Rank 0] step:461/10000 train_time:35206ms step_avg:76.37ms +[2025-07-05 09:36:38] [Rank 0] step:481/10000 train_time:36681ms step_avg:76.26ms +[2025-07-05 09:36:38] [Rank 0] step:481/10000 train_time:36681ms step_avg:76.26ms +[2025-07-05 09:36:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:36:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:36:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3919 train_time:38153ms step_avg:76.31ms +[2025-07-05 09:36:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3919 train_time:38153ms step_avg:76.31ms +[2025-07-05 09:36:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:36:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..730fd30f3813c1f0746f1e7e1b88007ce1a1ec16 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "97224b74-aa73-4010-908b-dfe728321b89", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_97224b74-aa73-4010-908b-dfe728321b89.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_97224b74-aa73-4010-908b-dfe728321b89.txt new file mode 100644 index 0000000000000000000000000000000000000000..255d81a6f245278552e03f4ee2d0fad470e9154a --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_97224b74-aa73-4010-908b-dfe728321b89.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:02:02] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:02:02 2025 --- +[2025-07-05 10:02:02] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:02:02 2025 --- +[2025-07-05 10:02:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:02:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:02:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:02:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:02:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:02:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:02:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46 +[2025-07-05 10:02:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46 +[2025-07-05 10:02:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:02:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:02:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:02:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:02:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:02:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:02:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:02:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:02:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:02:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:02:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:02:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:02:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:02:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:02:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:02:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:02:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:02:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:02:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:02:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:02:06] [Rank 0] PRINT: Model returns: +[2025-07-05 10:02:06] [Rank 0] PRINT: Model returns: +[2025-07-05 10:02:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:02:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:02:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:02:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:02:06] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:02:06] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:02:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:02:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:02:06] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:02:06] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:02:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:02:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:02:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:02:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:02:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:02:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:03:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:03:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:03:12] [Rank 0] PRINT: Starting training... +[2025-07-05 10:03:12] [Rank 0] PRINT: Starting training... +[2025-07-05 10:03:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:03:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:03:20] [Rank 0] step:21/10000 train_time:1551ms step_avg:73.84ms +[2025-07-05 10:03:20] [Rank 0] step:21/10000 train_time:1551ms step_avg:73.84ms +[2025-07-05 10:03:22] [Rank 0] step:41/10000 train_time:3004ms step_avg:73.27ms +[2025-07-05 10:03:22] [Rank 0] step:41/10000 train_time:3004ms step_avg:73.27ms +[2025-07-05 10:03:23] [Rank 0] step:61/10000 train_time:4458ms step_avg:73.08ms +[2025-07-05 10:03:23] [Rank 0] step:61/10000 train_time:4458ms step_avg:73.08ms +[2025-07-05 10:03:25] [Rank 0] step:81/10000 train_time:5913ms step_avg:73.00ms +[2025-07-05 10:03:25] [Rank 0] step:81/10000 train_time:5913ms step_avg:73.00ms +[2025-07-05 10:03:26] [Rank 0] step:101/10000 train_time:7605ms step_avg:75.30ms +[2025-07-05 10:03:26] [Rank 0] step:101/10000 train_time:7605ms step_avg:75.30ms +[2025-07-05 10:03:28] [Rank 0] step:121/10000 train_time:9061ms step_avg:74.88ms +[2025-07-05 10:03:28] [Rank 0] step:121/10000 train_time:9061ms step_avg:74.88ms +[2025-07-05 10:03:29] [Rank 0] step:141/10000 train_time:10516ms step_avg:74.58ms +[2025-07-05 10:03:29] [Rank 0] step:141/10000 train_time:10516ms step_avg:74.58ms +[2025-07-05 10:03:31] [Rank 0] step:161/10000 train_time:11973ms step_avg:74.37ms +[2025-07-05 10:03:31] [Rank 0] step:161/10000 train_time:11973ms step_avg:74.37ms +[2025-07-05 10:03:32] [Rank 0] step:181/10000 train_time:13686ms step_avg:75.61ms +[2025-07-05 10:03:32] [Rank 0] step:181/10000 train_time:13686ms step_avg:75.61ms +[2025-07-05 10:03:34] [Rank 0] step:201/10000 train_time:15126ms step_avg:75.25ms +[2025-07-05 10:03:34] [Rank 0] step:201/10000 train_time:15126ms step_avg:75.25ms +[2025-07-05 10:03:35] [Rank 0] step:221/10000 train_time:16588ms step_avg:75.06ms +[2025-07-05 10:03:35] [Rank 0] step:221/10000 train_time:16588ms step_avg:75.06ms +[2025-07-05 10:03:37] [Rank 0] step:241/10000 train_time:18291ms step_avg:75.90ms +[2025-07-05 10:03:37] [Rank 0] step:241/10000 train_time:18291ms step_avg:75.90ms +[2025-07-05 10:03:39] [Rank 0] step:261/10000 train_time:19821ms step_avg:75.94ms +[2025-07-05 10:03:39] [Rank 0] step:261/10000 train_time:19821ms step_avg:75.94ms +[2025-07-05 10:03:40] [Rank 0] step:281/10000 train_time:21560ms step_avg:76.72ms +[2025-07-05 10:03:40] [Rank 0] step:281/10000 train_time:21560ms step_avg:76.72ms +[2025-07-05 10:03:42] [Rank 0] step:301/10000 train_time:23023ms step_avg:76.49ms +[2025-07-05 10:03:42] [Rank 0] step:301/10000 train_time:23023ms step_avg:76.49ms +[2025-07-05 10:03:43] [Rank 0] step:321/10000 train_time:24490ms step_avg:76.29ms +[2025-07-05 10:03:43] [Rank 0] step:321/10000 train_time:24490ms step_avg:76.29ms +[2025-07-05 10:03:45] [Rank 0] step:341/10000 train_time:25955ms step_avg:76.12ms +[2025-07-05 10:03:45] [Rank 0] step:341/10000 train_time:25955ms step_avg:76.12ms +[2025-07-05 10:03:46] [Rank 0] step:361/10000 train_time:27424ms step_avg:75.97ms +[2025-07-05 10:03:46] [Rank 0] step:361/10000 train_time:27424ms step_avg:75.97ms +[2025-07-05 10:03:48] [Rank 0] step:381/10000 train_time:29126ms step_avg:76.45ms +[2025-07-05 10:03:48] [Rank 0] step:381/10000 train_time:29126ms step_avg:76.45ms +[2025-07-05 10:03:49] [Rank 0] step:401/10000 train_time:30595ms step_avg:76.30ms +[2025-07-05 10:03:49] [Rank 0] step:401/10000 train_time:30595ms step_avg:76.30ms +[2025-07-05 10:03:51] [Rank 0] step:421/10000 train_time:32063ms step_avg:76.16ms +[2025-07-05 10:03:51] [Rank 0] step:421/10000 train_time:32063ms step_avg:76.16ms +[2025-07-05 10:03:52] [Rank 0] step:441/10000 train_time:33532ms step_avg:76.04ms +[2025-07-05 10:03:52] [Rank 0] step:441/10000 train_time:33532ms step_avg:76.04ms +[2025-07-05 10:03:54] [Rank 0] step:461/10000 train_time:35646ms step_avg:77.32ms +[2025-07-05 10:03:54] [Rank 0] step:461/10000 train_time:35646ms step_avg:77.32ms +[2025-07-05 10:03:56] [Rank 0] step:481/10000 train_time:37118ms step_avg:77.17ms +[2025-07-05 10:03:56] [Rank 0] step:481/10000 train_time:37118ms step_avg:77.17ms +[2025-07-05 10:03:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:58] [Rank 0] PRINT: step:500/10000 train_loss:6.8630 val_loss:4.3860 train_time:38586ms step_avg:77.17ms +[2025-07-05 10:03:58] [Rank 0] PRINT: step:500/10000 train_loss:6.8630 val_loss:4.3860 train_time:38586ms step_avg:77.17ms +[2025-07-05 10:03:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:03:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..11b1da7c2891a9229879f3c6e5685d0e5464d560 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2292ebaa-777e-4bc7-aef0-096fd1e21bfb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/training_log_2292ebaa-777e-4bc7-aef0-096fd1e21bfb.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/training_log_2292ebaa-777e-4bc7-aef0-096fd1e21bfb.txt new file mode 100644 index 0000000000000000000000000000000000000000..0f89d3285838168463e2f54b11cfd5feae435c36 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47/training_log_2292ebaa-777e-4bc7-aef0-096fd1e21bfb.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:21:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:21:18 2025 --- +[2025-07-05 08:21:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:21:18 2025 --- +[2025-07-05 08:21:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:21:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:21:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:21:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:21:18] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:21:18] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:21:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47 +[2025-07-05 08:21:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_47 +[2025-07-05 08:21:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:21:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:21:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:21:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:21:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:21:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:21:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:21:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:21:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:21:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:21:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:21:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:21:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:21:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:21:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:21:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:21:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:21:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:21:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:21:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:21:22] [Rank 0] PRINT: Model returns: +[2025-07-05 08:21:22] [Rank 0] PRINT: Model returns: +[2025-07-05 08:21:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:21:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:21:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:21:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:21:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:21:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:21:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:21:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:21:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:21:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:21:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:21:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:21:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:21:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:21:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:21:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:22:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:22:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:22:27] [Rank 0] PRINT: Starting training... +[2025-07-05 08:22:27] [Rank 0] PRINT: Starting training... +[2025-07-05 08:22:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:22:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:22:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:22:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:22:36] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 08:22:36] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 08:22:37] [Rank 0] step:41/10000 train_time:3001ms step_avg:73.19ms +[2025-07-05 08:22:37] [Rank 0] step:41/10000 train_time:3001ms step_avg:73.19ms +[2025-07-05 08:22:39] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.00ms +[2025-07-05 08:22:39] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.00ms +[2025-07-05 08:22:40] [Rank 0] step:81/10000 train_time:5902ms step_avg:72.86ms +[2025-07-05 08:22:40] [Rank 0] step:81/10000 train_time:5902ms step_avg:72.86ms +[2025-07-05 08:22:42] [Rank 0] step:101/10000 train_time:8019ms step_avg:79.40ms +[2025-07-05 08:22:42] [Rank 0] step:101/10000 train_time:8019ms step_avg:79.40ms +[2025-07-05 08:22:44] [Rank 0] step:121/10000 train_time:9470ms step_avg:78.27ms +[2025-07-05 08:22:44] [Rank 0] step:121/10000 train_time:9470ms step_avg:78.27ms +[2025-07-05 08:22:45] [Rank 0] step:141/10000 train_time:10923ms step_avg:77.47ms +[2025-07-05 08:22:45] [Rank 0] step:141/10000 train_time:10923ms step_avg:77.47ms +[2025-07-05 08:22:47] [Rank 0] step:161/10000 train_time:12375ms step_avg:76.86ms +[2025-07-05 08:22:47] [Rank 0] step:161/10000 train_time:12375ms step_avg:76.86ms +[2025-07-05 08:22:48] [Rank 0] step:181/10000 train_time:13828ms step_avg:76.40ms +[2025-07-05 08:22:48] [Rank 0] step:181/10000 train_time:13828ms step_avg:76.40ms +[2025-07-05 08:22:50] [Rank 0] step:201/10000 train_time:15518ms step_avg:77.20ms +[2025-07-05 08:22:50] [Rank 0] step:201/10000 train_time:15518ms step_avg:77.20ms +[2025-07-05 08:22:51] [Rank 0] step:221/10000 train_time:16973ms step_avg:76.80ms +[2025-07-05 08:22:51] [Rank 0] step:221/10000 train_time:16973ms step_avg:76.80ms +[2025-07-05 08:22:53] [Rank 0] step:241/10000 train_time:18431ms step_avg:76.48ms +[2025-07-05 08:22:53] [Rank 0] step:241/10000 train_time:18431ms step_avg:76.48ms +[2025-07-05 08:22:54] [Rank 0] step:261/10000 train_time:19890ms step_avg:76.21ms +[2025-07-05 08:22:54] [Rank 0] step:261/10000 train_time:19890ms step_avg:76.21ms +[2025-07-05 08:22:56] [Rank 0] step:281/10000 train_time:22003ms step_avg:78.30ms +[2025-07-05 08:22:56] [Rank 0] step:281/10000 train_time:22003ms step_avg:78.30ms +[2025-07-05 08:22:58] [Rank 0] step:301/10000 train_time:23463ms step_avg:77.95ms +[2025-07-05 08:22:58] [Rank 0] step:301/10000 train_time:23463ms step_avg:77.95ms +[2025-07-05 08:22:59] [Rank 0] step:321/10000 train_time:24926ms step_avg:77.65ms +[2025-07-05 08:22:59] [Rank 0] step:321/10000 train_time:24926ms step_avg:77.65ms +[2025-07-05 08:23:01] [Rank 0] step:341/10000 train_time:26389ms step_avg:77.39ms +[2025-07-05 08:23:01] [Rank 0] step:341/10000 train_time:26389ms step_avg:77.39ms +[2025-07-05 08:23:03] [Rank 0] step:361/10000 train_time:27852ms step_avg:77.15ms +[2025-07-05 08:23:03] [Rank 0] step:361/10000 train_time:27852ms step_avg:77.15ms +[2025-07-05 08:23:04] [Rank 0] step:381/10000 train_time:29971ms step_avg:78.66ms +[2025-07-05 08:23:04] [Rank 0] step:381/10000 train_time:29971ms step_avg:78.66ms +[2025-07-05 08:23:06] [Rank 0] step:401/10000 train_time:31436ms step_avg:78.39ms +[2025-07-05 08:23:06] [Rank 0] step:401/10000 train_time:31436ms step_avg:78.39ms +[2025-07-05 08:23:07] [Rank 0] step:421/10000 train_time:32900ms step_avg:78.15ms +[2025-07-05 08:23:07] [Rank 0] step:421/10000 train_time:32900ms step_avg:78.15ms +[2025-07-05 08:23:09] [Rank 0] step:441/10000 train_time:34365ms step_avg:77.92ms +[2025-07-05 08:23:09] [Rank 0] step:441/10000 train_time:34365ms step_avg:77.92ms +[2025-07-05 08:23:11] [Rank 0] step:461/10000 train_time:36469ms step_avg:79.11ms +[2025-07-05 08:23:11] [Rank 0] step:461/10000 train_time:36469ms step_avg:79.11ms +[2025-07-05 08:23:12] [Rank 0] step:481/10000 train_time:37935ms step_avg:78.87ms +[2025-07-05 08:23:12] [Rank 0] step:481/10000 train_time:37935ms step_avg:78.87ms +[2025-07-05 08:23:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:14] [Rank 0] PRINT: step:500/10000 train_loss:6.8638 val_loss:4.3882 train_time:39399ms step_avg:78.80ms +[2025-07-05 08:23:14] [Rank 0] PRINT: step:500/10000 train_loss:6.8638 val_loss:4.3882 train_time:39399ms step_avg:78.80ms +[2025-07-05 08:23:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:23:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..16246abdfde819d1626508083970035400c9386f --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "75249436-d1f0-4cae-9df5-66500d44d3d5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_75249436-d1f0-4cae-9df5-66500d44d3d5.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_75249436-d1f0-4cae-9df5-66500d44d3d5.txt new file mode 100644 index 0000000000000000000000000000000000000000..a185713220c0981320764f2ba67fb0e08ce1a75d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_75249436-d1f0-4cae-9df5-66500d44d3d5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:48:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:48:43 2025 --- +[2025-07-05 08:48:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:48:43 2025 --- +[2025-07-05 08:48:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:48:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:48:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:48:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:48:43] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:48:43] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:48:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48 +[2025-07-05 08:48:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48 +[2025-07-05 08:48:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:48:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:48:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:48:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:48:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:48:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:48:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:48:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:48:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:48:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:48:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:48:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:48:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:48:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:48:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:48:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:48:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:48:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:48:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:48:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:48:46] [Rank 0] PRINT: Model returns: +[2025-07-05 08:48:46] [Rank 0] PRINT: Model returns: +[2025-07-05 08:48:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:48:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:48:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:48:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:48:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:48:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:48:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:48:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:48:46] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:48:46] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:48:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:48:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:48:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:48:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:48:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:48:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:49:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:49:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:49:54] [Rank 0] PRINT: Starting training... +[2025-07-05 08:49:54] [Rank 0] PRINT: Starting training... +[2025-07-05 08:49:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:49:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:50:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:50:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:50:02] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.45ms +[2025-07-05 08:50:02] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.45ms +[2025-07-05 08:50:04] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.19ms +[2025-07-05 08:50:04] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.19ms +[2025-07-05 08:50:05] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 08:50:05] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-05 08:50:07] [Rank 0] step:81/10000 train_time:6117ms step_avg:75.51ms +[2025-07-05 08:50:07] [Rank 0] step:81/10000 train_time:6117ms step_avg:75.51ms +[2025-07-05 08:50:09] [Rank 0] step:101/10000 train_time:7815ms step_avg:77.37ms +[2025-07-05 08:50:09] [Rank 0] step:101/10000 train_time:7815ms step_avg:77.37ms +[2025-07-05 08:50:10] [Rank 0] step:121/10000 train_time:9272ms step_avg:76.63ms +[2025-07-05 08:50:10] [Rank 0] step:121/10000 train_time:9272ms step_avg:76.63ms +[2025-07-05 08:50:11] [Rank 0] step:141/10000 train_time:10729ms step_avg:76.09ms +[2025-07-05 08:50:11] [Rank 0] step:141/10000 train_time:10729ms step_avg:76.09ms +[2025-07-05 08:50:13] [Rank 0] step:161/10000 train_time:12188ms step_avg:75.70ms +[2025-07-05 08:50:13] [Rank 0] step:161/10000 train_time:12188ms step_avg:75.70ms +[2025-07-05 08:50:15] [Rank 0] step:181/10000 train_time:14330ms step_avg:79.17ms +[2025-07-05 08:50:15] [Rank 0] step:181/10000 train_time:14330ms step_avg:79.17ms +[2025-07-05 08:50:17] [Rank 0] step:201/10000 train_time:15771ms step_avg:78.46ms +[2025-07-05 08:50:17] [Rank 0] step:201/10000 train_time:15771ms step_avg:78.46ms +[2025-07-05 08:50:18] [Rank 0] step:221/10000 train_time:17231ms step_avg:77.97ms +[2025-07-05 08:50:18] [Rank 0] step:221/10000 train_time:17231ms step_avg:77.97ms +[2025-07-05 08:50:19] [Rank 0] step:241/10000 train_time:18695ms step_avg:77.57ms +[2025-07-05 08:50:19] [Rank 0] step:241/10000 train_time:18695ms step_avg:77.57ms +[2025-07-05 08:50:21] [Rank 0] step:261/10000 train_time:20159ms step_avg:77.24ms +[2025-07-05 08:50:21] [Rank 0] step:261/10000 train_time:20159ms step_avg:77.24ms +[2025-07-05 08:50:23] [Rank 0] step:281/10000 train_time:22270ms step_avg:79.25ms +[2025-07-05 08:50:23] [Rank 0] step:281/10000 train_time:22270ms step_avg:79.25ms +[2025-07-05 08:50:24] [Rank 0] step:301/10000 train_time:23733ms step_avg:78.85ms +[2025-07-05 08:50:24] [Rank 0] step:301/10000 train_time:23733ms step_avg:78.85ms +[2025-07-05 08:50:26] [Rank 0] step:321/10000 train_time:25201ms step_avg:78.51ms +[2025-07-05 08:50:26] [Rank 0] step:321/10000 train_time:25201ms step_avg:78.51ms +[2025-07-05 08:50:27] [Rank 0] step:341/10000 train_time:26670ms step_avg:78.21ms +[2025-07-05 08:50:27] [Rank 0] step:341/10000 train_time:26670ms step_avg:78.21ms +[2025-07-05 08:50:30] [Rank 0] step:361/10000 train_time:28831ms step_avg:79.86ms +[2025-07-05 08:50:30] [Rank 0] step:361/10000 train_time:28831ms step_avg:79.86ms +[2025-07-05 08:50:31] [Rank 0] step:381/10000 train_time:30273ms step_avg:79.46ms +[2025-07-05 08:50:31] [Rank 0] step:381/10000 train_time:30273ms step_avg:79.46ms +[2025-07-05 08:50:32] [Rank 0] step:401/10000 train_time:31743ms step_avg:79.16ms +[2025-07-05 08:50:32] [Rank 0] step:401/10000 train_time:31743ms step_avg:79.16ms +[2025-07-05 08:50:34] [Rank 0] step:421/10000 train_time:33209ms step_avg:78.88ms +[2025-07-05 08:50:34] [Rank 0] step:421/10000 train_time:33209ms step_avg:78.88ms +[2025-07-05 08:50:35] [Rank 0] step:441/10000 train_time:34679ms step_avg:78.64ms +[2025-07-05 08:50:35] [Rank 0] step:441/10000 train_time:34679ms step_avg:78.64ms +[2025-07-05 08:50:38] [Rank 0] step:461/10000 train_time:36800ms step_avg:79.83ms +[2025-07-05 08:50:38] [Rank 0] step:461/10000 train_time:36800ms step_avg:79.83ms +[2025-07-05 08:50:39] [Rank 0] step:481/10000 train_time:38273ms step_avg:79.57ms +[2025-07-05 08:50:39] [Rank 0] step:481/10000 train_time:38273ms step_avg:79.57ms +[2025-07-05 08:50:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:50:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:50:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8669 val_loss:4.3937 train_time:39836ms step_avg:79.67ms +[2025-07-05 08:50:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8669 val_loss:4.3937 train_time:39836ms step_avg:79.67ms +[2025-07-05 08:50:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:50:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6ae4831d062cf55455c4312ad3f010cc5c26a002 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1622401c-ff59-46e6-a030-a7e119afd858", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/training_log_1622401c-ff59-46e6-a030-a7e119afd858.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/training_log_1622401c-ff59-46e6-a030-a7e119afd858.txt new file mode 100644 index 0000000000000000000000000000000000000000..1765c60447f22a03120a882006f3c0fcd2721782 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49/training_log_1622401c-ff59-46e6-a030-a7e119afd858.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:16:19] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:16:19 2025 --- +[2025-07-05 09:16:19] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:16:19 2025 --- +[2025-07-05 09:16:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:16:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:16:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:16:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:16:19] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:16:19] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:16:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49 +[2025-07-05 09:16:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_49 +[2025-07-05 09:16:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:16:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:16:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:16:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:16:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:16:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:16:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:16:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:16:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:16:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:16:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:16:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:16:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:16:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:16:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:16:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:16:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:16:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:16:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:16:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:16:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:16:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:16:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:16:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:16:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:16:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:16:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:16:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:16:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:16:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:16:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:16:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:16:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:16:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:16:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:16:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:16:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:16:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:17:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:17:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:17:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:17:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:17:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:17:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:17:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:17:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:17:36] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.29ms +[2025-07-05 09:17:36] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.29ms +[2025-07-05 09:17:38] [Rank 0] step:41/10000 train_time:3202ms step_avg:78.10ms +[2025-07-05 09:17:38] [Rank 0] step:41/10000 train_time:3202ms step_avg:78.10ms +[2025-07-05 09:17:39] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.27ms +[2025-07-05 09:17:39] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.27ms +[2025-07-05 09:17:40] [Rank 0] step:81/10000 train_time:6105ms step_avg:75.36ms +[2025-07-05 09:17:40] [Rank 0] step:81/10000 train_time:6105ms step_avg:75.36ms +[2025-07-05 09:17:43] [Rank 0] step:101/10000 train_time:8213ms step_avg:81.31ms +[2025-07-05 09:17:43] [Rank 0] step:101/10000 train_time:8213ms step_avg:81.31ms +[2025-07-05 09:17:44] [Rank 0] step:121/10000 train_time:9666ms step_avg:79.88ms +[2025-07-05 09:17:44] [Rank 0] step:121/10000 train_time:9666ms step_avg:79.88ms +[2025-07-05 09:17:45] [Rank 0] step:141/10000 train_time:11119ms step_avg:78.86ms +[2025-07-05 09:17:45] [Rank 0] step:141/10000 train_time:11119ms step_avg:78.86ms +[2025-07-05 09:17:47] [Rank 0] step:161/10000 train_time:12574ms step_avg:78.10ms +[2025-07-05 09:17:47] [Rank 0] step:161/10000 train_time:12574ms step_avg:78.10ms +[2025-07-05 09:17:49] [Rank 0] step:181/10000 train_time:14288ms step_avg:78.94ms +[2025-07-05 09:17:49] [Rank 0] step:181/10000 train_time:14288ms step_avg:78.94ms +[2025-07-05 09:17:50] [Rank 0] step:201/10000 train_time:16141ms step_avg:80.30ms +[2025-07-05 09:17:50] [Rank 0] step:201/10000 train_time:16141ms step_avg:80.30ms +[2025-07-05 09:17:52] [Rank 0] step:221/10000 train_time:17599ms step_avg:79.63ms +[2025-07-05 09:17:52] [Rank 0] step:221/10000 train_time:17599ms step_avg:79.63ms +[2025-07-05 09:17:53] [Rank 0] step:241/10000 train_time:19060ms step_avg:79.09ms +[2025-07-05 09:17:53] [Rank 0] step:241/10000 train_time:19060ms step_avg:79.09ms +[2025-07-05 09:17:55] [Rank 0] step:261/10000 train_time:20525ms step_avg:78.64ms +[2025-07-05 09:17:55] [Rank 0] step:261/10000 train_time:20525ms step_avg:78.64ms +[2025-07-05 09:17:57] [Rank 0] step:281/10000 train_time:22644ms step_avg:80.58ms +[2025-07-05 09:17:57] [Rank 0] step:281/10000 train_time:22644ms step_avg:80.58ms +[2025-07-05 09:17:58] [Rank 0] step:301/10000 train_time:24106ms step_avg:80.09ms +[2025-07-05 09:17:58] [Rank 0] step:301/10000 train_time:24106ms step_avg:80.09ms +[2025-07-05 09:18:00] [Rank 0] step:321/10000 train_time:25572ms step_avg:79.66ms +[2025-07-05 09:18:00] [Rank 0] step:321/10000 train_time:25572ms step_avg:79.66ms +[2025-07-05 09:18:01] [Rank 0] step:341/10000 train_time:27037ms step_avg:79.29ms +[2025-07-05 09:18:01] [Rank 0] step:341/10000 train_time:27037ms step_avg:79.29ms +[2025-07-05 09:18:03] [Rank 0] step:361/10000 train_time:28551ms step_avg:79.09ms +[2025-07-05 09:18:03] [Rank 0] step:361/10000 train_time:28551ms step_avg:79.09ms +[2025-07-05 09:18:05] [Rank 0] step:381/10000 train_time:30202ms step_avg:79.27ms +[2025-07-05 09:18:05] [Rank 0] step:381/10000 train_time:30202ms step_avg:79.27ms +[2025-07-05 09:18:06] [Rank 0] step:401/10000 train_time:31930ms step_avg:79.63ms +[2025-07-05 09:18:06] [Rank 0] step:401/10000 train_time:31930ms step_avg:79.63ms +[2025-07-05 09:18:08] [Rank 0] step:421/10000 train_time:33396ms step_avg:79.33ms +[2025-07-05 09:18:08] [Rank 0] step:421/10000 train_time:33396ms step_avg:79.33ms +[2025-07-05 09:18:09] [Rank 0] step:441/10000 train_time:34862ms step_avg:79.05ms +[2025-07-05 09:18:09] [Rank 0] step:441/10000 train_time:34862ms step_avg:79.05ms +[2025-07-05 09:18:11] [Rank 0] step:461/10000 train_time:36983ms step_avg:80.22ms +[2025-07-05 09:18:11] [Rank 0] step:461/10000 train_time:36983ms step_avg:80.22ms +[2025-07-05 09:18:13] [Rank 0] step:481/10000 train_time:38451ms step_avg:79.94ms +[2025-07-05 09:18:13] [Rank 0] step:481/10000 train_time:38451ms step_avg:79.94ms +[2025-07-05 09:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:15] [Rank 0] PRINT: step:500/10000 train_loss:6.8666 val_loss:4.3905 train_time:39920ms step_avg:79.84ms +[2025-07-05 09:18:15] [Rank 0] PRINT: step:500/10000 train_loss:6.8666 val_loss:4.3905 train_time:39920ms step_avg:79.84ms +[2025-07-05 09:18:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:18:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e4e48a3d82c15dd2a6342b729dded96fd07c4721 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d95e5ba4-42d8-44c3-bec9-ed7ef396c028", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/training_log_d95e5ba4-42d8-44c3-bec9-ed7ef396c028.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/training_log_d95e5ba4-42d8-44c3-bec9-ed7ef396c028.txt new file mode 100644 index 0000000000000000000000000000000000000000..b260e51af8c7b3a1ddef5251c53e6c623a6bb87e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50/training_log_d95e5ba4-42d8-44c3-bec9-ed7ef396c028.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:43:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:43:40 2025 --- +[2025-07-05 09:43:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:43:40 2025 --- +[2025-07-05 09:43:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:43:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:43:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:43:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:43:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:43:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:43:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50 +[2025-07-05 09:43:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_50 +[2025-07-05 09:43:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:43:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:43:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:43:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:43:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:43:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:43:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:43:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:43:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:43:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:43:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:43:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:43:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:43:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:43:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:43:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:43:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:43:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:43:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:43:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:43:44] [Rank 0] PRINT: Model returns: +[2025-07-05 09:43:44] [Rank 0] PRINT: Model returns: +[2025-07-05 09:43:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:43:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:43:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:43:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:43:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:43:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:43:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:43:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:43:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:43:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:43:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:43:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:43:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:43:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:43:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:43:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:44:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:44:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:44:49] [Rank 0] PRINT: Starting training... +[2025-07-05 09:44:49] [Rank 0] PRINT: Starting training... +[2025-07-05 09:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:44:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:44:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:44:58] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.73ms +[2025-07-05 09:44:58] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.73ms +[2025-07-05 09:44:59] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.23ms +[2025-07-05 09:44:59] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.23ms +[2025-07-05 09:45:01] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.05ms +[2025-07-05 09:45:01] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.05ms +[2025-07-05 09:45:02] [Rank 0] step:81/10000 train_time:5912ms step_avg:72.99ms +[2025-07-05 09:45:02] [Rank 0] step:81/10000 train_time:5912ms step_avg:72.99ms +[2025-07-05 09:45:04] [Rank 0] step:101/10000 train_time:8027ms step_avg:79.47ms +[2025-07-05 09:45:04] [Rank 0] step:101/10000 train_time:8027ms step_avg:79.47ms +[2025-07-05 09:45:06] [Rank 0] step:121/10000 train_time:9484ms step_avg:78.38ms +[2025-07-05 09:45:06] [Rank 0] step:121/10000 train_time:9484ms step_avg:78.38ms +[2025-07-05 09:45:07] [Rank 0] step:141/10000 train_time:10941ms step_avg:77.59ms +[2025-07-05 09:45:07] [Rank 0] step:141/10000 train_time:10941ms step_avg:77.59ms +[2025-07-05 09:45:09] [Rank 0] step:161/10000 train_time:12401ms step_avg:77.03ms +[2025-07-05 09:45:09] [Rank 0] step:161/10000 train_time:12401ms step_avg:77.03ms +[2025-07-05 09:45:11] [Rank 0] step:181/10000 train_time:13916ms step_avg:76.88ms +[2025-07-05 09:45:11] [Rank 0] step:181/10000 train_time:13916ms step_avg:76.88ms +[2025-07-05 09:45:12] [Rank 0] step:201/10000 train_time:15991ms step_avg:79.56ms +[2025-07-05 09:45:12] [Rank 0] step:201/10000 train_time:15991ms step_avg:79.56ms +[2025-07-05 09:45:14] [Rank 0] step:221/10000 train_time:17451ms step_avg:78.96ms +[2025-07-05 09:45:14] [Rank 0] step:221/10000 train_time:17451ms step_avg:78.96ms +[2025-07-05 09:45:15] [Rank 0] step:241/10000 train_time:18918ms step_avg:78.50ms +[2025-07-05 09:45:15] [Rank 0] step:241/10000 train_time:18918ms step_avg:78.50ms +[2025-07-05 09:45:17] [Rank 0] step:261/10000 train_time:20385ms step_avg:78.10ms +[2025-07-05 09:45:17] [Rank 0] step:261/10000 train_time:20385ms step_avg:78.10ms +[2025-07-05 09:45:19] [Rank 0] step:281/10000 train_time:22523ms step_avg:80.15ms +[2025-07-05 09:45:19] [Rank 0] step:281/10000 train_time:22523ms step_avg:80.15ms +[2025-07-05 09:45:20] [Rank 0] step:301/10000 train_time:23989ms step_avg:79.70ms +[2025-07-05 09:45:20] [Rank 0] step:301/10000 train_time:23989ms step_avg:79.70ms +[2025-07-05 09:45:22] [Rank 0] step:321/10000 train_time:25457ms step_avg:79.30ms +[2025-07-05 09:45:22] [Rank 0] step:321/10000 train_time:25457ms step_avg:79.30ms +[2025-07-05 09:45:23] [Rank 0] step:341/10000 train_time:26927ms step_avg:78.96ms +[2025-07-05 09:45:23] [Rank 0] step:341/10000 train_time:26927ms step_avg:78.96ms +[2025-07-05 09:45:25] [Rank 0] step:361/10000 train_time:28396ms step_avg:78.66ms +[2025-07-05 09:45:25] [Rank 0] step:361/10000 train_time:28396ms step_avg:78.66ms +[2025-07-05 09:45:27] [Rank 0] step:381/10000 train_time:30524ms step_avg:80.12ms +[2025-07-05 09:45:27] [Rank 0] step:381/10000 train_time:30524ms step_avg:80.12ms +[2025-07-05 09:45:28] [Rank 0] step:401/10000 train_time:31993ms step_avg:79.78ms +[2025-07-05 09:45:28] [Rank 0] step:401/10000 train_time:31993ms step_avg:79.78ms +[2025-07-05 09:45:30] [Rank 0] step:421/10000 train_time:33776ms step_avg:80.23ms +[2025-07-05 09:45:30] [Rank 0] step:421/10000 train_time:33776ms step_avg:80.23ms +[2025-07-05 09:45:32] [Rank 0] step:441/10000 train_time:35245ms step_avg:79.92ms +[2025-07-05 09:45:32] [Rank 0] step:441/10000 train_time:35245ms step_avg:79.92ms +[2025-07-05 09:45:34] [Rank 0] step:461/10000 train_time:37371ms step_avg:81.06ms +[2025-07-05 09:45:34] [Rank 0] step:461/10000 train_time:37371ms step_avg:81.06ms +[2025-07-05 09:45:35] [Rank 0] step:481/10000 train_time:38844ms step_avg:80.76ms +[2025-07-05 09:45:35] [Rank 0] step:481/10000 train_time:38844ms step_avg:80.76ms +[2025-07-05 09:45:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:37] [Rank 0] PRINT: step:500/10000 train_loss:6.8652 val_loss:4.3924 train_time:40314ms step_avg:80.63ms +[2025-07-05 09:45:37] [Rank 0] PRINT: step:500/10000 train_loss:6.8652 val_loss:4.3924 train_time:40314ms step_avg:80.63ms +[2025-07-05 09:45:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:45:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f361cd308be725133c7e5ef76d397efaac92185f --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "7c7d8be1-703d-4e14-a1d5-035b20ae8f28", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/training_log_7c7d8be1-703d-4e14-a1d5-035b20ae8f28.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/training_log_7c7d8be1-703d-4e14-a1d5-035b20ae8f28.txt new file mode 100644 index 0000000000000000000000000000000000000000..2988cb15624931c6925fe5cc70fcec3fe3521851 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51/training_log_7c7d8be1-703d-4e14-a1d5-035b20ae8f28.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:11:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:11:04 2025 --- +[2025-07-05 10:11:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:11:04 2025 --- +[2025-07-05 10:11:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:11:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:11:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:11:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:11:04] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:11:04] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:11:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51 +[2025-07-05 10:11:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_51 +[2025-07-05 10:11:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:11:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:11:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:11:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:11:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:11:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:11:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:11:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:11:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:11:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:11:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:11:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:11:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:11:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:11:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:11:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:11:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:11:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:11:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:11:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:11:07] [Rank 0] PRINT: Model returns: +[2025-07-05 10:11:07] [Rank 0] PRINT: Model returns: +[2025-07-05 10:11:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:11:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:11:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:11:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:11:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:11:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:11:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:11:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:11:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:11:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:11:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:11:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:11:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:11:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:11:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:11:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:12:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:12:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:12:12] [Rank 0] PRINT: Starting training... +[2025-07-05 10:12:12] [Rank 0] PRINT: Starting training... +[2025-07-05 10:12:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:12:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:12:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:12:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:12:21] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-05 10:12:21] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-05 10:12:22] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.30ms +[2025-07-05 10:12:22] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.30ms +[2025-07-05 10:12:24] [Rank 0] step:61/10000 train_time:4665ms step_avg:76.47ms +[2025-07-05 10:12:24] [Rank 0] step:61/10000 train_time:4665ms step_avg:76.47ms +[2025-07-05 10:12:25] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.56ms +[2025-07-05 10:12:25] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.56ms +[2025-07-05 10:12:28] [Rank 0] step:101/10000 train_time:8224ms step_avg:81.42ms +[2025-07-05 10:12:28] [Rank 0] step:101/10000 train_time:8224ms step_avg:81.42ms +[2025-07-05 10:12:29] [Rank 0] step:121/10000 train_time:9682ms step_avg:80.02ms +[2025-07-05 10:12:29] [Rank 0] step:121/10000 train_time:9682ms step_avg:80.02ms +[2025-07-05 10:12:30] [Rank 0] step:141/10000 train_time:11140ms step_avg:79.00ms +[2025-07-05 10:12:30] [Rank 0] step:141/10000 train_time:11140ms step_avg:79.00ms +[2025-07-05 10:12:32] [Rank 0] step:161/10000 train_time:12607ms step_avg:78.30ms +[2025-07-05 10:12:32] [Rank 0] step:161/10000 train_time:12607ms step_avg:78.30ms +[2025-07-05 10:12:34] [Rank 0] step:181/10000 train_time:14746ms step_avg:81.47ms +[2025-07-05 10:12:34] [Rank 0] step:181/10000 train_time:14746ms step_avg:81.47ms +[2025-07-05 10:12:35] [Rank 0] step:201/10000 train_time:16187ms step_avg:80.53ms +[2025-07-05 10:12:35] [Rank 0] step:201/10000 train_time:16187ms step_avg:80.53ms +[2025-07-05 10:12:37] [Rank 0] step:221/10000 train_time:17646ms step_avg:79.85ms +[2025-07-05 10:12:37] [Rank 0] step:221/10000 train_time:17646ms step_avg:79.85ms +[2025-07-05 10:12:38] [Rank 0] step:241/10000 train_time:19110ms step_avg:79.30ms +[2025-07-05 10:12:38] [Rank 0] step:241/10000 train_time:19110ms step_avg:79.30ms +[2025-07-05 10:12:40] [Rank 0] step:261/10000 train_time:20576ms step_avg:78.83ms +[2025-07-05 10:12:40] [Rank 0] step:261/10000 train_time:20576ms step_avg:78.83ms +[2025-07-05 10:12:42] [Rank 0] step:281/10000 train_time:22689ms step_avg:80.74ms +[2025-07-05 10:12:42] [Rank 0] step:281/10000 train_time:22689ms step_avg:80.74ms +[2025-07-05 10:12:43] [Rank 0] step:301/10000 train_time:24154ms step_avg:80.25ms +[2025-07-05 10:12:43] [Rank 0] step:301/10000 train_time:24154ms step_avg:80.25ms +[2025-07-05 10:12:45] [Rank 0] step:321/10000 train_time:25620ms step_avg:79.81ms +[2025-07-05 10:12:45] [Rank 0] step:321/10000 train_time:25620ms step_avg:79.81ms +[2025-07-05 10:12:46] [Rank 0] step:341/10000 train_time:27092ms step_avg:79.45ms +[2025-07-05 10:12:46] [Rank 0] step:341/10000 train_time:27092ms step_avg:79.45ms +[2025-07-05 10:12:49] [Rank 0] step:361/10000 train_time:28559ms step_avg:79.11ms +[2025-07-05 10:12:49] [Rank 0] step:361/10000 train_time:28559ms step_avg:79.11ms +[2025-07-05 10:12:50] [Rank 0] step:381/10000 train_time:30692ms step_avg:80.56ms +[2025-07-05 10:12:50] [Rank 0] step:381/10000 train_time:30692ms step_avg:80.56ms +[2025-07-05 10:12:51] [Rank 0] step:401/10000 train_time:32161ms step_avg:80.20ms +[2025-07-05 10:12:51] [Rank 0] step:401/10000 train_time:32161ms step_avg:80.20ms +[2025-07-05 10:12:53] [Rank 0] step:421/10000 train_time:33635ms step_avg:79.89ms +[2025-07-05 10:12:53] [Rank 0] step:421/10000 train_time:33635ms step_avg:79.89ms +[2025-07-05 10:12:55] [Rank 0] step:441/10000 train_time:35200ms step_avg:79.82ms +[2025-07-05 10:12:55] [Rank 0] step:441/10000 train_time:35200ms step_avg:79.82ms +[2025-07-05 10:12:56] [Rank 0] step:461/10000 train_time:37150ms step_avg:80.59ms +[2025-07-05 10:12:56] [Rank 0] step:461/10000 train_time:37150ms step_avg:80.59ms +[2025-07-05 10:12:58] [Rank 0] step:481/10000 train_time:38617ms step_avg:80.29ms +[2025-07-05 10:12:58] [Rank 0] step:481/10000 train_time:38617ms step_avg:80.29ms +[2025-07-05 10:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:12:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:13:00] [Rank 0] PRINT: step:500/10000 train_loss:6.8658 val_loss:4.3922 train_time:40091ms step_avg:80.18ms +[2025-07-05 10:13:00] [Rank 0] PRINT: step:500/10000 train_loss:6.8658 val_loss:4.3922 train_time:40091ms step_avg:80.18ms +[2025-07-05 10:13:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:13:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cc2c325e2524a1e656e60bc01097321dc39d5dd7 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "20f96d83-36d8-4a61-a86d-540fdbc9d2db", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_20f96d83-36d8-4a61-a86d-540fdbc9d2db.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_20f96d83-36d8-4a61-a86d-540fdbc9d2db.txt new file mode 100644 index 0000000000000000000000000000000000000000..f038296662f5917fc3200b3fb350f6eb58a65341 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_20f96d83-36d8-4a61-a86d-540fdbc9d2db.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:09:25] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:09:25 2025 --- +[2025-07-05 08:09:25] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:09:25 2025 --- +[2025-07-05 08:09:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:09:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:09:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:09:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:09:25] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:09:25] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:09:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42 +[2025-07-05 08:09:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42 +[2025-07-05 08:09:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:09:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:09:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:09:25] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:09:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:09:25] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:09:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:09:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:09:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:09:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:09:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:09:27] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:09:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:09:28] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:09:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:09:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:09:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:09:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:09:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:09:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:09:28] [Rank 0] PRINT: Model returns: +[2025-07-05 08:09:28] [Rank 0] PRINT: Model returns: +[2025-07-05 08:09:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:09:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:09:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:09:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:09:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:09:28] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:09:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:09:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:09:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:09:28] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:09:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:09:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:09:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:09:28] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:09:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:09:28] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:11:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:11:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:11:09] [Rank 0] PRINT: Starting training... +[2025-07-05 08:11:09] [Rank 0] PRINT: Starting training... +[2025-07-05 08:11:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:11:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:11:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:11:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:11:19] [Rank 0] step:21/10000 train_time:1650ms step_avg:78.58ms +[2025-07-05 08:11:19] [Rank 0] step:21/10000 train_time:1650ms step_avg:78.58ms +[2025-07-05 08:11:20] [Rank 0] step:41/10000 train_time:3109ms step_avg:75.82ms +[2025-07-05 08:11:20] [Rank 0] step:41/10000 train_time:3109ms step_avg:75.82ms +[2025-07-05 08:11:22] [Rank 0] step:61/10000 train_time:4565ms step_avg:74.83ms +[2025-07-05 08:11:22] [Rank 0] step:61/10000 train_time:4565ms step_avg:74.83ms +[2025-07-05 08:11:23] [Rank 0] step:81/10000 train_time:6024ms step_avg:74.38ms +[2025-07-05 08:11:23] [Rank 0] step:81/10000 train_time:6024ms step_avg:74.38ms +[2025-07-05 08:11:25] [Rank 0] step:101/10000 train_time:8146ms step_avg:80.65ms +[2025-07-05 08:11:25] [Rank 0] step:101/10000 train_time:8146ms step_avg:80.65ms +[2025-07-05 08:11:27] [Rank 0] step:121/10000 train_time:9708ms step_avg:80.24ms +[2025-07-05 08:11:27] [Rank 0] step:121/10000 train_time:9708ms step_avg:80.24ms +[2025-07-05 08:11:28] [Rank 0] step:141/10000 train_time:11170ms step_avg:79.22ms +[2025-07-05 08:11:28] [Rank 0] step:141/10000 train_time:11170ms step_avg:79.22ms +[2025-07-05 08:11:30] [Rank 0] step:161/10000 train_time:12734ms step_avg:79.09ms +[2025-07-05 08:11:30] [Rank 0] step:161/10000 train_time:12734ms step_avg:79.09ms +[2025-07-05 08:11:32] [Rank 0] step:181/10000 train_time:14455ms step_avg:79.86ms +[2025-07-05 08:11:32] [Rank 0] step:181/10000 train_time:14455ms step_avg:79.86ms +[2025-07-05 08:11:34] [Rank 0] step:201/10000 train_time:16304ms step_avg:81.12ms +[2025-07-05 08:11:34] [Rank 0] step:201/10000 train_time:16304ms step_avg:81.12ms +[2025-07-05 08:11:35] [Rank 0] step:221/10000 train_time:17772ms step_avg:80.41ms +[2025-07-05 08:11:35] [Rank 0] step:221/10000 train_time:17772ms step_avg:80.41ms +[2025-07-05 08:11:37] [Rank 0] step:241/10000 train_time:19343ms step_avg:80.26ms +[2025-07-05 08:11:37] [Rank 0] step:241/10000 train_time:19343ms step_avg:80.26ms +[2025-07-05 08:11:38] [Rank 0] step:261/10000 train_time:20815ms step_avg:79.75ms +[2025-07-05 08:11:38] [Rank 0] step:261/10000 train_time:20815ms step_avg:79.75ms +[2025-07-05 08:11:40] [Rank 0] step:281/10000 train_time:22314ms step_avg:79.41ms +[2025-07-05 08:11:40] [Rank 0] step:281/10000 train_time:22314ms step_avg:79.41ms +[2025-07-05 08:11:41] [Rank 0] step:301/10000 train_time:23786ms step_avg:79.02ms +[2025-07-05 08:11:41] [Rank 0] step:301/10000 train_time:23786ms step_avg:79.02ms +[2025-07-05 08:11:42] [Rank 0] step:321/10000 train_time:25256ms step_avg:78.68ms +[2025-07-05 08:11:42] [Rank 0] step:321/10000 train_time:25256ms step_avg:78.68ms +[2025-07-05 08:11:44] [Rank 0] step:341/10000 train_time:26728ms step_avg:78.38ms +[2025-07-05 08:11:44] [Rank 0] step:341/10000 train_time:26728ms step_avg:78.38ms +[2025-07-05 08:11:46] [Rank 0] step:361/10000 train_time:28199ms step_avg:78.11ms +[2025-07-05 08:11:46] [Rank 0] step:361/10000 train_time:28199ms step_avg:78.11ms +[2025-07-05 08:11:47] [Rank 0] step:381/10000 train_time:29907ms step_avg:78.50ms +[2025-07-05 08:11:47] [Rank 0] step:381/10000 train_time:29907ms step_avg:78.50ms +[2025-07-05 08:11:49] [Rank 0] step:401/10000 train_time:31377ms step_avg:78.25ms +[2025-07-05 08:11:49] [Rank 0] step:401/10000 train_time:31377ms step_avg:78.25ms +[2025-07-05 08:11:50] [Rank 0] step:421/10000 train_time:32847ms step_avg:78.02ms +[2025-07-05 08:11:50] [Rank 0] step:421/10000 train_time:32847ms step_avg:78.02ms +[2025-07-05 08:11:52] [Rank 0] step:441/10000 train_time:34553ms step_avg:78.35ms +[2025-07-05 08:11:52] [Rank 0] step:441/10000 train_time:34553ms step_avg:78.35ms +[2025-07-05 08:11:54] [Rank 0] step:461/10000 train_time:36318ms step_avg:78.78ms +[2025-07-05 08:11:54] [Rank 0] step:461/10000 train_time:36318ms step_avg:78.78ms +[2025-07-05 08:11:55] [Rank 0] step:481/10000 train_time:37844ms step_avg:78.68ms +[2025-07-05 08:11:55] [Rank 0] step:481/10000 train_time:37844ms step_avg:78.68ms +[2025-07-05 08:11:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:11:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:11:57] [Rank 0] PRINT: step:500/10000 train_loss:4.9403 val_loss:2.0531 train_time:39311ms step_avg:78.62ms +[2025-07-05 08:11:57] [Rank 0] PRINT: step:500/10000 train_loss:4.9403 val_loss:2.0531 train_time:39311ms step_avg:78.62ms +[2025-07-05 08:11:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:11:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..20d5e03fab704e976303c0f15fe7d15c48ecc533 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0e074af1-77d0-4a00-8343-c827b3cb14ae", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_0e074af1-77d0-4a00-8343-c827b3cb14ae.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_0e074af1-77d0-4a00-8343-c827b3cb14ae.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f8b81d0ff92b570adcaafe294694d50835c6d76 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_0e074af1-77d0-4a00-8343-c827b3cb14ae.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:37:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:37:45 2025 --- +[2025-07-05 08:37:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:37:45 2025 --- +[2025-07-05 08:37:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:37:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:37:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:37:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:37:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:37:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:37:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43 +[2025-07-05 08:37:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43 +[2025-07-05 08:37:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:37:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:37:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:37:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:37:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:37:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:37:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:37:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:37:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:37:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:37:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:37:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:37:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:37:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:37:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:37:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:37:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:37:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:37:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:37:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:37:49] [Rank 0] PRINT: Model returns: +[2025-07-05 08:37:49] [Rank 0] PRINT: Model returns: +[2025-07-05 08:37:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:37:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:37:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:37:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:37:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:37:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:37:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:37:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:37:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:37:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:37:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:37:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:37:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:37:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:37:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:37:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:38:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:38:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:38:55] [Rank 0] PRINT: Starting training... +[2025-07-05 08:38:55] [Rank 0] PRINT: Starting training... +[2025-07-05 08:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:39:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:39:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:39:03] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 08:39:03] [Rank 0] step:21/10000 train_time:1546ms step_avg:73.63ms +[2025-07-05 08:39:05] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.18ms +[2025-07-05 08:39:05] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.18ms +[2025-07-05 08:39:06] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.05ms +[2025-07-05 08:39:06] [Rank 0] step:61/10000 train_time:4456ms step_avg:73.05ms +[2025-07-05 08:39:08] [Rank 0] step:81/10000 train_time:5915ms step_avg:73.03ms +[2025-07-05 08:39:08] [Rank 0] step:81/10000 train_time:5915ms step_avg:73.03ms +[2025-07-05 08:39:10] [Rank 0] step:101/10000 train_time:8018ms step_avg:79.39ms +[2025-07-05 08:39:10] [Rank 0] step:101/10000 train_time:8018ms step_avg:79.39ms +[2025-07-05 08:39:11] [Rank 0] step:121/10000 train_time:9479ms step_avg:78.34ms +[2025-07-05 08:39:11] [Rank 0] step:121/10000 train_time:9479ms step_avg:78.34ms +[2025-07-05 08:39:13] [Rank 0] step:141/10000 train_time:10944ms step_avg:77.62ms +[2025-07-05 08:39:13] [Rank 0] step:141/10000 train_time:10944ms step_avg:77.62ms +[2025-07-05 08:39:14] [Rank 0] step:161/10000 train_time:12407ms step_avg:77.06ms +[2025-07-05 08:39:14] [Rank 0] step:161/10000 train_time:12407ms step_avg:77.06ms +[2025-07-05 08:39:16] [Rank 0] step:181/10000 train_time:14127ms step_avg:78.05ms +[2025-07-05 08:39:16] [Rank 0] step:181/10000 train_time:14127ms step_avg:78.05ms +[2025-07-05 08:39:18] [Rank 0] step:201/10000 train_time:16220ms step_avg:80.70ms +[2025-07-05 08:39:18] [Rank 0] step:201/10000 train_time:16220ms step_avg:80.70ms +[2025-07-05 08:39:20] [Rank 0] step:221/10000 train_time:17850ms step_avg:80.77ms +[2025-07-05 08:39:20] [Rank 0] step:221/10000 train_time:17850ms step_avg:80.77ms +[2025-07-05 08:39:21] [Rank 0] step:241/10000 train_time:19315ms step_avg:80.15ms +[2025-07-05 08:39:21] [Rank 0] step:241/10000 train_time:19315ms step_avg:80.15ms +[2025-07-05 08:39:23] [Rank 0] step:261/10000 train_time:20783ms step_avg:79.63ms +[2025-07-05 08:39:23] [Rank 0] step:261/10000 train_time:20783ms step_avg:79.63ms +[2025-07-05 08:39:25] [Rank 0] step:281/10000 train_time:22900ms step_avg:81.49ms +[2025-07-05 08:39:25] [Rank 0] step:281/10000 train_time:22900ms step_avg:81.49ms +[2025-07-05 08:39:26] [Rank 0] step:301/10000 train_time:24367ms step_avg:80.95ms +[2025-07-05 08:39:26] [Rank 0] step:301/10000 train_time:24367ms step_avg:80.95ms +[2025-07-05 08:39:28] [Rank 0] step:321/10000 train_time:25835ms step_avg:80.48ms +[2025-07-05 08:39:28] [Rank 0] step:321/10000 train_time:25835ms step_avg:80.48ms +[2025-07-05 08:39:29] [Rank 0] step:341/10000 train_time:27307ms step_avg:80.08ms +[2025-07-05 08:39:29] [Rank 0] step:341/10000 train_time:27307ms step_avg:80.08ms +[2025-07-05 08:39:31] [Rank 0] step:361/10000 train_time:28827ms step_avg:79.85ms +[2025-07-05 08:39:31] [Rank 0] step:361/10000 train_time:28827ms step_avg:79.85ms +[2025-07-05 08:39:33] [Rank 0] step:381/10000 train_time:30901ms step_avg:81.10ms +[2025-07-05 08:39:33] [Rank 0] step:381/10000 train_time:30901ms step_avg:81.10ms +[2025-07-05 08:39:34] [Rank 0] step:401/10000 train_time:32371ms step_avg:80.73ms +[2025-07-05 08:39:34] [Rank 0] step:401/10000 train_time:32371ms step_avg:80.73ms +[2025-07-05 08:39:36] [Rank 0] step:421/10000 train_time:33840ms step_avg:80.38ms +[2025-07-05 08:39:36] [Rank 0] step:421/10000 train_time:33840ms step_avg:80.38ms +[2025-07-05 08:39:37] [Rank 0] step:441/10000 train_time:35308ms step_avg:80.06ms +[2025-07-05 08:39:37] [Rank 0] step:441/10000 train_time:35308ms step_avg:80.06ms +[2025-07-05 08:39:39] [Rank 0] step:461/10000 train_time:37016ms step_avg:80.30ms +[2025-07-05 08:39:39] [Rank 0] step:461/10000 train_time:37016ms step_avg:80.30ms +[2025-07-05 08:39:40] [Rank 0] step:481/10000 train_time:38484ms step_avg:80.01ms +[2025-07-05 08:39:40] [Rank 0] step:481/10000 train_time:38484ms step_avg:80.01ms +[2025-07-05 08:39:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:39:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:39:43] [Rank 0] PRINT: step:500/10000 train_loss:4.9429 val_loss:2.0511 train_time:39953ms step_avg:79.91ms +[2025-07-05 08:39:43] [Rank 0] PRINT: step:500/10000 train_loss:4.9429 val_loss:2.0511 train_time:39953ms step_avg:79.91ms +[2025-07-05 08:39:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:39:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0200dcc8623666266d1bd4b6a67c969827f52321 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c2fa12be-d7e0-46fa-82e6-d3ab177a97eb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/training_log_c2fa12be-d7e0-46fa-82e6-d3ab177a97eb.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/training_log_c2fa12be-d7e0-46fa-82e6-d3ab177a97eb.txt new file mode 100644 index 0000000000000000000000000000000000000000..f7b9a2e33fd61b7608cb19384ac2e0bf57992e8f --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44/training_log_c2fa12be-d7e0-46fa-82e6-d3ab177a97eb.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:05:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:05:03 2025 --- +[2025-07-05 09:05:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:05:03 2025 --- +[2025-07-05 09:05:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:05:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:05:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:05:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:05:03] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:05:03] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:05:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44 +[2025-07-05 09:05:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_44 +[2025-07-05 09:05:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:05:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:05:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:05:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:05:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:05:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:05:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:05:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:05:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:05:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:05:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:05:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:05:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:05:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:05:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:05:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:05:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:05:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:05:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:05:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:05:06] [Rank 0] PRINT: Model returns: +[2025-07-05 09:05:06] [Rank 0] PRINT: Model returns: +[2025-07-05 09:05:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:05:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:05:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:05:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:05:06] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:05:06] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:05:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:05:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:05:06] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:05:06] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:05:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:05:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:05:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:05:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:05:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:05:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:06:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:06:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:06:11] [Rank 0] PRINT: Starting training... +[2025-07-05 09:06:11] [Rank 0] PRINT: Starting training... +[2025-07-05 09:06:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:06:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:06:20] [Rank 0] step:21/10000 train_time:1547ms step_avg:73.66ms +[2025-07-05 09:06:20] [Rank 0] step:21/10000 train_time:1547ms step_avg:73.66ms +[2025-07-05 09:06:22] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.59ms +[2025-07-05 09:06:22] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.59ms +[2025-07-05 09:06:23] [Rank 0] step:61/10000 train_time:4556ms step_avg:74.69ms +[2025-07-05 09:06:23] [Rank 0] step:61/10000 train_time:4556ms step_avg:74.69ms +[2025-07-05 09:06:25] [Rank 0] step:81/10000 train_time:6012ms step_avg:74.22ms +[2025-07-05 09:06:25] [Rank 0] step:81/10000 train_time:6012ms step_avg:74.22ms +[2025-07-05 09:06:27] [Rank 0] step:101/10000 train_time:8122ms step_avg:80.42ms +[2025-07-05 09:06:27] [Rank 0] step:101/10000 train_time:8122ms step_avg:80.42ms +[2025-07-05 09:06:28] [Rank 0] step:121/10000 train_time:9581ms step_avg:79.18ms +[2025-07-05 09:06:28] [Rank 0] step:121/10000 train_time:9581ms step_avg:79.18ms +[2025-07-05 09:06:30] [Rank 0] step:141/10000 train_time:11042ms step_avg:78.31ms +[2025-07-05 09:06:30] [Rank 0] step:141/10000 train_time:11042ms step_avg:78.31ms +[2025-07-05 09:06:31] [Rank 0] step:161/10000 train_time:12504ms step_avg:77.67ms +[2025-07-05 09:06:31] [Rank 0] step:161/10000 train_time:12504ms step_avg:77.67ms +[2025-07-05 09:06:33] [Rank 0] step:181/10000 train_time:14226ms step_avg:78.60ms +[2025-07-05 09:06:33] [Rank 0] step:181/10000 train_time:14226ms step_avg:78.60ms +[2025-07-05 09:06:34] [Rank 0] step:201/10000 train_time:15672ms step_avg:77.97ms +[2025-07-05 09:06:34] [Rank 0] step:201/10000 train_time:15672ms step_avg:77.97ms +[2025-07-05 09:06:36] [Rank 0] step:221/10000 train_time:17138ms step_avg:77.55ms +[2025-07-05 09:06:36] [Rank 0] step:221/10000 train_time:17138ms step_avg:77.55ms +[2025-07-05 09:06:37] [Rank 0] step:241/10000 train_time:18606ms step_avg:77.20ms +[2025-07-05 09:06:37] [Rank 0] step:241/10000 train_time:18606ms step_avg:77.20ms +[2025-07-05 09:06:39] [Rank 0] step:261/10000 train_time:20071ms step_avg:76.90ms +[2025-07-05 09:06:39] [Rank 0] step:261/10000 train_time:20071ms step_avg:76.90ms +[2025-07-05 09:06:40] [Rank 0] step:281/10000 train_time:21576ms step_avg:76.78ms +[2025-07-05 09:06:40] [Rank 0] step:281/10000 train_time:21576ms step_avg:76.78ms +[2025-07-05 09:06:42] [Rank 0] step:301/10000 train_time:23215ms step_avg:77.13ms +[2025-07-05 09:06:42] [Rank 0] step:301/10000 train_time:23215ms step_avg:77.13ms +[2025-07-05 09:06:43] [Rank 0] step:321/10000 train_time:24825ms step_avg:77.34ms +[2025-07-05 09:06:43] [Rank 0] step:321/10000 train_time:24825ms step_avg:77.34ms +[2025-07-05 09:06:45] [Rank 0] step:341/10000 train_time:26292ms step_avg:77.10ms +[2025-07-05 09:06:45] [Rank 0] step:341/10000 train_time:26292ms step_avg:77.10ms +[2025-07-05 09:06:47] [Rank 0] step:361/10000 train_time:28015ms step_avg:77.60ms +[2025-07-05 09:06:47] [Rank 0] step:361/10000 train_time:28015ms step_avg:77.60ms +[2025-07-05 09:06:49] [Rank 0] step:381/10000 train_time:29982ms step_avg:78.69ms +[2025-07-05 09:06:49] [Rank 0] step:381/10000 train_time:29982ms step_avg:78.69ms +[2025-07-05 09:06:50] [Rank 0] step:401/10000 train_time:31449ms step_avg:78.43ms +[2025-07-05 09:06:50] [Rank 0] step:401/10000 train_time:31449ms step_avg:78.43ms +[2025-07-05 09:06:51] [Rank 0] step:421/10000 train_time:32918ms step_avg:78.19ms +[2025-07-05 09:06:51] [Rank 0] step:421/10000 train_time:32918ms step_avg:78.19ms +[2025-07-05 09:06:53] [Rank 0] step:441/10000 train_time:34386ms step_avg:77.97ms +[2025-07-05 09:06:53] [Rank 0] step:441/10000 train_time:34386ms step_avg:77.97ms +[2025-07-05 09:06:54] [Rank 0] step:461/10000 train_time:35888ms step_avg:77.85ms +[2025-07-05 09:06:54] [Rank 0] step:461/10000 train_time:35888ms step_avg:77.85ms +[2025-07-05 09:06:56] [Rank 0] step:481/10000 train_time:37359ms step_avg:77.67ms +[2025-07-05 09:06:56] [Rank 0] step:481/10000 train_time:37359ms step_avg:77.67ms +[2025-07-05 09:06:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:58] [Rank 0] PRINT: step:500/10000 train_loss:4.9435 val_loss:2.0561 train_time:38826ms step_avg:77.65ms +[2025-07-05 09:06:58] [Rank 0] PRINT: step:500/10000 train_loss:4.9435 val_loss:2.0561 train_time:38826ms step_avg:77.65ms +[2025-07-05 09:06:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:06:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c7fece48aa15b94636d16cfb919e3e9c2aa742b8 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "ae10963c-5595-4548-a6f8-06c0c7919a12", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_ae10963c-5595-4548-a6f8-06c0c7919a12.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_ae10963c-5595-4548-a6f8-06c0c7919a12.txt new file mode 100644 index 0000000000000000000000000000000000000000..b1bf70cb1a33261e2a9a98f69cd0ffd87560bf64 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_ae10963c-5595-4548-a6f8-06c0c7919a12.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:32:28] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:32:28 2025 --- +[2025-07-05 09:32:28] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:32:28 2025 --- +[2025-07-05 09:32:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:32:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:32:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:32:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:32:28] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:32:28] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:32:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45 +[2025-07-05 09:32:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45 +[2025-07-05 09:32:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:32:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:32:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:32:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:32:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:32:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:32:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:32:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:32:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:32:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:32:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:32:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:32:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:32:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:32:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:32:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:32:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:32:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:32:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:32:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:32:32] [Rank 0] PRINT: Model returns: +[2025-07-05 09:32:32] [Rank 0] PRINT: Model returns: +[2025-07-05 09:32:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:32:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:32:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:32:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:32:32] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:32:32] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:32:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:32:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:32:32] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:32:32] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:32:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:32:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:32:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:32:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:32:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:32:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:33:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:33:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:33:36] [Rank 0] PRINT: Starting training... +[2025-07-05 09:33:36] [Rank 0] PRINT: Starting training... +[2025-07-05 09:33:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:33:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:33:45] [Rank 0] step:21/10000 train_time:1553ms step_avg:73.96ms +[2025-07-05 09:33:45] [Rank 0] step:21/10000 train_time:1553ms step_avg:73.96ms +[2025-07-05 09:33:46] [Rank 0] step:41/10000 train_time:3012ms step_avg:73.47ms +[2025-07-05 09:33:46] [Rank 0] step:41/10000 train_time:3012ms step_avg:73.47ms +[2025-07-05 09:33:48] [Rank 0] step:61/10000 train_time:4471ms step_avg:73.29ms +[2025-07-05 09:33:48] [Rank 0] step:61/10000 train_time:4471ms step_avg:73.29ms +[2025-07-05 09:33:49] [Rank 0] step:81/10000 train_time:5933ms step_avg:73.24ms +[2025-07-05 09:33:49] [Rank 0] step:81/10000 train_time:5933ms step_avg:73.24ms +[2025-07-05 09:33:51] [Rank 0] step:101/10000 train_time:8050ms step_avg:79.70ms +[2025-07-05 09:33:51] [Rank 0] step:101/10000 train_time:8050ms step_avg:79.70ms +[2025-07-05 09:33:53] [Rank 0] step:121/10000 train_time:9515ms step_avg:78.63ms +[2025-07-05 09:33:53] [Rank 0] step:121/10000 train_time:9515ms step_avg:78.63ms +[2025-07-05 09:33:54] [Rank 0] step:141/10000 train_time:10979ms step_avg:77.87ms +[2025-07-05 09:33:54] [Rank 0] step:141/10000 train_time:10979ms step_avg:77.87ms +[2025-07-05 09:33:56] [Rank 0] step:161/10000 train_time:12446ms step_avg:77.30ms +[2025-07-05 09:33:56] [Rank 0] step:161/10000 train_time:12446ms step_avg:77.30ms +[2025-07-05 09:33:57] [Rank 0] step:181/10000 train_time:14168ms step_avg:78.27ms +[2025-07-05 09:33:57] [Rank 0] step:181/10000 train_time:14168ms step_avg:78.27ms +[2025-07-05 09:33:59] [Rank 0] step:201/10000 train_time:15621ms step_avg:77.72ms +[2025-07-05 09:33:59] [Rank 0] step:201/10000 train_time:15621ms step_avg:77.72ms +[2025-07-05 09:34:00] [Rank 0] step:221/10000 train_time:17090ms step_avg:77.33ms +[2025-07-05 09:34:00] [Rank 0] step:221/10000 train_time:17090ms step_avg:77.33ms +[2025-07-05 09:34:02] [Rank 0] step:241/10000 train_time:18562ms step_avg:77.02ms +[2025-07-05 09:34:02] [Rank 0] step:241/10000 train_time:18562ms step_avg:77.02ms +[2025-07-05 09:34:03] [Rank 0] step:261/10000 train_time:20037ms step_avg:76.77ms +[2025-07-05 09:34:03] [Rank 0] step:261/10000 train_time:20037ms step_avg:76.77ms +[2025-07-05 09:34:05] [Rank 0] step:281/10000 train_time:21743ms step_avg:77.38ms +[2025-07-05 09:34:05] [Rank 0] step:281/10000 train_time:21743ms step_avg:77.38ms +[2025-07-05 09:34:07] [Rank 0] step:301/10000 train_time:23457ms step_avg:77.93ms +[2025-07-05 09:34:07] [Rank 0] step:301/10000 train_time:23457ms step_avg:77.93ms +[2025-07-05 09:34:08] [Rank 0] step:321/10000 train_time:25088ms step_avg:78.16ms +[2025-07-05 09:34:08] [Rank 0] step:321/10000 train_time:25088ms step_avg:78.16ms +[2025-07-05 09:34:10] [Rank 0] step:341/10000 train_time:26560ms step_avg:77.89ms +[2025-07-05 09:34:10] [Rank 0] step:341/10000 train_time:26560ms step_avg:77.89ms +[2025-07-05 09:34:12] [Rank 0] step:361/10000 train_time:28032ms step_avg:77.65ms +[2025-07-05 09:34:12] [Rank 0] step:361/10000 train_time:28032ms step_avg:77.65ms +[2025-07-05 09:34:13] [Rank 0] step:381/10000 train_time:29739ms step_avg:78.05ms +[2025-07-05 09:34:13] [Rank 0] step:381/10000 train_time:29739ms step_avg:78.05ms +[2025-07-05 09:34:14] [Rank 0] step:401/10000 train_time:31213ms step_avg:77.84ms +[2025-07-05 09:34:14] [Rank 0] step:401/10000 train_time:31213ms step_avg:77.84ms +[2025-07-05 09:34:16] [Rank 0] step:421/10000 train_time:32685ms step_avg:77.64ms +[2025-07-05 09:34:16] [Rank 0] step:421/10000 train_time:32685ms step_avg:77.64ms +[2025-07-05 09:34:17] [Rank 0] step:441/10000 train_time:34158ms step_avg:77.45ms +[2025-07-05 09:34:17] [Rank 0] step:441/10000 train_time:34158ms step_avg:77.45ms +[2025-07-05 09:34:19] [Rank 0] step:461/10000 train_time:35666ms step_avg:77.37ms +[2025-07-05 09:34:19] [Rank 0] step:461/10000 train_time:35666ms step_avg:77.37ms +[2025-07-05 09:34:20] [Rank 0] step:481/10000 train_time:37138ms step_avg:77.21ms +[2025-07-05 09:34:20] [Rank 0] step:481/10000 train_time:37138ms step_avg:77.21ms +[2025-07-05 09:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:34:23] [Rank 0] PRINT: step:500/10000 train_loss:4.9430 val_loss:2.0527 train_time:38612ms step_avg:77.22ms +[2025-07-05 09:34:23] [Rank 0] PRINT: step:500/10000 train_loss:4.9430 val_loss:2.0527 train_time:38612ms step_avg:77.22ms +[2025-07-05 09:34:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:34:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9fa6346f2926f65cbc8ecd4ea190f948ebc2fad0 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "cbf9b787-e306-4994-9dd2-2f5ac71952f3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_cbf9b787-e306-4994-9dd2-2f5ac71952f3.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_cbf9b787-e306-4994-9dd2-2f5ac71952f3.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8cadd7fa94df38e1f20bef469e1029654796dfc --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_cbf9b787-e306-4994-9dd2-2f5ac71952f3.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:59:35] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:59:35 2025 --- +[2025-07-05 09:59:35] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:59:35 2025 --- +[2025-07-05 09:59:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:59:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:59:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:59:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:59:35] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:59:35] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:59:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46 +[2025-07-05 09:59:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46 +[2025-07-05 09:59:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:59:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:59:35] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:59:35] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:59:35] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:59:35] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:59:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:59:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:59:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:59:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:59:37] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:59:37] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:59:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:59:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:59:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:59:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:59:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:59:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:59:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:59:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:59:38] [Rank 0] PRINT: Model returns: +[2025-07-05 09:59:38] [Rank 0] PRINT: Model returns: +[2025-07-05 09:59:38] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:59:38] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:59:38] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:59:38] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:59:38] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:59:38] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:59:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:59:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:59:38] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:59:38] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:59:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:59:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:59:38] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:59:38] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:59:38] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:59:38] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:00:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:00:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:00:49] [Rank 0] PRINT: Starting training... +[2025-07-05 10:00:49] [Rank 0] PRINT: Starting training... +[2025-07-05 10:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:00:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:00:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:00:58] [Rank 0] step:21/10000 train_time:1655ms step_avg:78.82ms +[2025-07-05 10:00:58] [Rank 0] step:21/10000 train_time:1655ms step_avg:78.82ms +[2025-07-05 10:00:59] [Rank 0] step:41/10000 train_time:3113ms step_avg:75.94ms +[2025-07-05 10:00:59] [Rank 0] step:41/10000 train_time:3113ms step_avg:75.94ms +[2025-07-05 10:01:01] [Rank 0] step:61/10000 train_time:4571ms step_avg:74.93ms +[2025-07-05 10:01:01] [Rank 0] step:61/10000 train_time:4571ms step_avg:74.93ms +[2025-07-05 10:01:02] [Rank 0] step:81/10000 train_time:6032ms step_avg:74.47ms +[2025-07-05 10:01:02] [Rank 0] step:81/10000 train_time:6032ms step_avg:74.47ms +[2025-07-05 10:01:04] [Rank 0] step:101/10000 train_time:7731ms step_avg:76.55ms +[2025-07-05 10:01:04] [Rank 0] step:101/10000 train_time:7731ms step_avg:76.55ms +[2025-07-05 10:01:05] [Rank 0] step:121/10000 train_time:9193ms step_avg:75.97ms +[2025-07-05 10:01:05] [Rank 0] step:121/10000 train_time:9193ms step_avg:75.97ms +[2025-07-05 10:01:07] [Rank 0] step:141/10000 train_time:10660ms step_avg:75.60ms +[2025-07-05 10:01:07] [Rank 0] step:141/10000 train_time:10660ms step_avg:75.60ms +[2025-07-05 10:01:08] [Rank 0] step:161/10000 train_time:12124ms step_avg:75.31ms +[2025-07-05 10:01:08] [Rank 0] step:161/10000 train_time:12124ms step_avg:75.31ms +[2025-07-05 10:01:10] [Rank 0] step:181/10000 train_time:13591ms step_avg:75.09ms +[2025-07-05 10:01:10] [Rank 0] step:181/10000 train_time:13591ms step_avg:75.09ms +[2025-07-05 10:01:11] [Rank 0] step:201/10000 train_time:15093ms step_avg:75.09ms +[2025-07-05 10:01:11] [Rank 0] step:201/10000 train_time:15093ms step_avg:75.09ms +[2025-07-05 10:01:13] [Rank 0] step:221/10000 train_time:16563ms step_avg:74.95ms +[2025-07-05 10:01:13] [Rank 0] step:221/10000 train_time:16563ms step_avg:74.95ms +[2025-07-05 10:01:14] [Rank 0] step:241/10000 train_time:18032ms step_avg:74.82ms +[2025-07-05 10:01:14] [Rank 0] step:241/10000 train_time:18032ms step_avg:74.82ms +[2025-07-05 10:01:16] [Rank 0] step:261/10000 train_time:19503ms step_avg:74.72ms +[2025-07-05 10:01:16] [Rank 0] step:261/10000 train_time:19503ms step_avg:74.72ms +[2025-07-05 10:01:18] [Rank 0] step:281/10000 train_time:21644ms step_avg:77.03ms +[2025-07-05 10:01:18] [Rank 0] step:281/10000 train_time:21644ms step_avg:77.03ms +[2025-07-05 10:01:19] [Rank 0] step:301/10000 train_time:23112ms step_avg:76.79ms +[2025-07-05 10:01:19] [Rank 0] step:301/10000 train_time:23112ms step_avg:76.79ms +[2025-07-05 10:01:21] [Rank 0] step:321/10000 train_time:24582ms step_avg:76.58ms +[2025-07-05 10:01:21] [Rank 0] step:321/10000 train_time:24582ms step_avg:76.58ms +[2025-07-05 10:01:22] [Rank 0] step:341/10000 train_time:26054ms step_avg:76.40ms +[2025-07-05 10:01:22] [Rank 0] step:341/10000 train_time:26054ms step_avg:76.40ms +[2025-07-05 10:01:24] [Rank 0] step:361/10000 train_time:27524ms step_avg:76.24ms +[2025-07-05 10:01:24] [Rank 0] step:361/10000 train_time:27524ms step_avg:76.24ms +[2025-07-05 10:01:26] [Rank 0] step:381/10000 train_time:29646ms step_avg:77.81ms +[2025-07-05 10:01:26] [Rank 0] step:381/10000 train_time:29646ms step_avg:77.81ms +[2025-07-05 10:01:27] [Rank 0] step:401/10000 train_time:31115ms step_avg:77.59ms +[2025-07-05 10:01:27] [Rank 0] step:401/10000 train_time:31115ms step_avg:77.59ms +[2025-07-05 10:01:29] [Rank 0] step:421/10000 train_time:32587ms step_avg:77.40ms +[2025-07-05 10:01:29] [Rank 0] step:421/10000 train_time:32587ms step_avg:77.40ms +[2025-07-05 10:01:30] [Rank 0] step:441/10000 train_time:34310ms step_avg:77.80ms +[2025-07-05 10:01:30] [Rank 0] step:441/10000 train_time:34310ms step_avg:77.80ms +[2025-07-05 10:01:32] [Rank 0] step:461/10000 train_time:36082ms step_avg:78.27ms +[2025-07-05 10:01:32] [Rank 0] step:461/10000 train_time:36082ms step_avg:78.27ms +[2025-07-05 10:01:34] [Rank 0] step:481/10000 train_time:37570ms step_avg:78.11ms +[2025-07-05 10:01:34] [Rank 0] step:481/10000 train_time:37570ms step_avg:78.11ms +[2025-07-05 10:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:01:36] [Rank 0] PRINT: step:500/10000 train_loss:4.9406 val_loss:2.0534 train_time:39039ms step_avg:78.08ms +[2025-07-05 10:01:36] [Rank 0] PRINT: step:500/10000 train_loss:4.9406 val_loss:2.0534 train_time:39039ms step_avg:78.08ms +[2025-07-05 10:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..921e9512ee8a3e79db857013b97d50dc7317fdaa --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "03b816f8-ea37-46af-a3dc-552b5ed1fea0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/training_log_03b816f8-ea37-46af-a3dc-552b5ed1fea0.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/training_log_03b816f8-ea37-46af-a3dc-552b5ed1fea0.txt new file mode 100644 index 0000000000000000000000000000000000000000..a6df4bc7be58157214d120c5e141f1c6205846d9 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47/training_log_03b816f8-ea37-46af-a3dc-552b5ed1fea0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:18:56] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:18:56 2025 --- +[2025-07-05 08:18:56] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:18:56 2025 --- +[2025-07-05 08:18:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:18:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:18:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:18:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:18:56] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:18:56] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:18:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47 +[2025-07-05 08:18:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_47 +[2025-07-05 08:18:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:18:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:18:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:18:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:18:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:18:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:18:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:18:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:18:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:18:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:18:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:18:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:19:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:19:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:19:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:19:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:19:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:19:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:19:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:19:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:19:00] [Rank 0] PRINT: Model returns: +[2025-07-05 08:19:00] [Rank 0] PRINT: Model returns: +[2025-07-05 08:19:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:19:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:19:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:19:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:19:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:19:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:19:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:19:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:19:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:19:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:19:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:19:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:19:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:19:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:19:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:19:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:20:07] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:20:07] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:20:07] [Rank 0] PRINT: Starting training... +[2025-07-05 08:20:07] [Rank 0] PRINT: Starting training... +[2025-07-05 08:20:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:20:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:20:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:20:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:20:17] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.57ms +[2025-07-05 08:20:17] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.57ms +[2025-07-05 08:20:18] [Rank 0] step:41/10000 train_time:3209ms step_avg:78.27ms +[2025-07-05 08:20:18] [Rank 0] step:41/10000 train_time:3209ms step_avg:78.27ms +[2025-07-05 08:20:20] [Rank 0] step:61/10000 train_time:4662ms step_avg:76.43ms +[2025-07-05 08:20:20] [Rank 0] step:61/10000 train_time:4662ms step_avg:76.43ms +[2025-07-05 08:20:21] [Rank 0] step:81/10000 train_time:6117ms step_avg:75.51ms +[2025-07-05 08:20:21] [Rank 0] step:81/10000 train_time:6117ms step_avg:75.51ms +[2025-07-05 08:20:23] [Rank 0] step:101/10000 train_time:7815ms step_avg:77.37ms +[2025-07-05 08:20:23] [Rank 0] step:101/10000 train_time:7815ms step_avg:77.37ms +[2025-07-05 08:20:24] [Rank 0] step:121/10000 train_time:9372ms step_avg:77.45ms +[2025-07-05 08:20:24] [Rank 0] step:121/10000 train_time:9372ms step_avg:77.45ms +[2025-07-05 08:20:26] [Rank 0] step:141/10000 train_time:10930ms step_avg:77.52ms +[2025-07-05 08:20:26] [Rank 0] step:141/10000 train_time:10930ms step_avg:77.52ms +[2025-07-05 08:20:27] [Rank 0] step:161/10000 train_time:12393ms step_avg:76.97ms +[2025-07-05 08:20:27] [Rank 0] step:161/10000 train_time:12393ms step_avg:76.97ms +[2025-07-05 08:20:29] [Rank 0] step:181/10000 train_time:13902ms step_avg:76.81ms +[2025-07-05 08:20:29] [Rank 0] step:181/10000 train_time:13902ms step_avg:76.81ms +[2025-07-05 08:20:31] [Rank 0] step:201/10000 train_time:15550ms step_avg:77.36ms +[2025-07-05 08:20:31] [Rank 0] step:201/10000 train_time:15550ms step_avg:77.36ms +[2025-07-05 08:20:32] [Rank 0] step:221/10000 train_time:17015ms step_avg:76.99ms +[2025-07-05 08:20:32] [Rank 0] step:221/10000 train_time:17015ms step_avg:76.99ms +[2025-07-05 08:20:34] [Rank 0] step:241/10000 train_time:18481ms step_avg:76.68ms +[2025-07-05 08:20:34] [Rank 0] step:241/10000 train_time:18481ms step_avg:76.68ms +[2025-07-05 08:20:35] [Rank 0] step:261/10000 train_time:19950ms step_avg:76.44ms +[2025-07-05 08:20:35] [Rank 0] step:261/10000 train_time:19950ms step_avg:76.44ms +[2025-07-05 08:20:37] [Rank 0] step:281/10000 train_time:22078ms step_avg:78.57ms +[2025-07-05 08:20:37] [Rank 0] step:281/10000 train_time:22078ms step_avg:78.57ms +[2025-07-05 08:20:39] [Rank 0] step:301/10000 train_time:23543ms step_avg:78.22ms +[2025-07-05 08:20:39] [Rank 0] step:301/10000 train_time:23543ms step_avg:78.22ms +[2025-07-05 08:20:40] [Rank 0] step:321/10000 train_time:25009ms step_avg:77.91ms +[2025-07-05 08:20:40] [Rank 0] step:321/10000 train_time:25009ms step_avg:77.91ms +[2025-07-05 08:20:42] [Rank 0] step:341/10000 train_time:26475ms step_avg:77.64ms +[2025-07-05 08:20:42] [Rank 0] step:341/10000 train_time:26475ms step_avg:77.64ms +[2025-07-05 08:20:44] [Rank 0] step:361/10000 train_time:27940ms step_avg:77.40ms +[2025-07-05 08:20:44] [Rank 0] step:361/10000 train_time:27940ms step_avg:77.40ms +[2025-07-05 08:20:45] [Rank 0] step:381/10000 train_time:30072ms step_avg:78.93ms +[2025-07-05 08:20:45] [Rank 0] step:381/10000 train_time:30072ms step_avg:78.93ms +[2025-07-05 08:20:47] [Rank 0] step:401/10000 train_time:31539ms step_avg:78.65ms +[2025-07-05 08:20:47] [Rank 0] step:401/10000 train_time:31539ms step_avg:78.65ms +[2025-07-05 08:20:48] [Rank 0] step:421/10000 train_time:33008ms step_avg:78.40ms +[2025-07-05 08:20:48] [Rank 0] step:421/10000 train_time:33008ms step_avg:78.40ms +[2025-07-05 08:20:50] [Rank 0] step:441/10000 train_time:34575ms step_avg:78.40ms +[2025-07-05 08:20:50] [Rank 0] step:441/10000 train_time:34575ms step_avg:78.40ms +[2025-07-05 08:20:52] [Rank 0] step:461/10000 train_time:36701ms step_avg:79.61ms +[2025-07-05 08:20:52] [Rank 0] step:461/10000 train_time:36701ms step_avg:79.61ms +[2025-07-05 08:20:53] [Rank 0] step:481/10000 train_time:38167ms step_avg:79.35ms +[2025-07-05 08:20:53] [Rank 0] step:481/10000 train_time:38167ms step_avg:79.35ms +[2025-07-05 08:20:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:20:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:20:56] [Rank 0] PRINT: step:500/10000 train_loss:4.9411 val_loss:2.0542 train_time:39632ms step_avg:79.26ms +[2025-07-05 08:20:56] [Rank 0] PRINT: step:500/10000 train_loss:4.9411 val_loss:2.0542 train_time:39632ms step_avg:79.26ms +[2025-07-05 08:20:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:20:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..dbc341bc45bf6c83aacf03c5f56dfbbe329e4f37 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "639e2168-a8b5-43df-aaa9-48bb9e73d4a5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_639e2168-a8b5-43df-aaa9-48bb9e73d4a5.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_639e2168-a8b5-43df-aaa9-48bb9e73d4a5.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f4fb1cc694ece9b20b2b2ad925b8f5724ef5367 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_639e2168-a8b5-43df-aaa9-48bb9e73d4a5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:46:22] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:46:22 2025 --- +[2025-07-05 08:46:22] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:46:22 2025 --- +[2025-07-05 08:46:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:46:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:46:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:46:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:46:22] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:46:22] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:46:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48 +[2025-07-05 08:46:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48 +[2025-07-05 08:46:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:46:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:46:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:46:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:46:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:46:22] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:46:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:46:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:46:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:46:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:46:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:46:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:46:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:46:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:46:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:46:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:46:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:46:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:46:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:46:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:46:25] [Rank 0] PRINT: Model returns: +[2025-07-05 08:46:25] [Rank 0] PRINT: Model returns: +[2025-07-05 08:46:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:46:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:46:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:46:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:46:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:46:25] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:46:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:46:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:46:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:46:25] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:46:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:46:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:46:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:46:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:46:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:46:25] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:47:32] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:47:32] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:47:32] [Rank 0] PRINT: Starting training... +[2025-07-05 08:47:32] [Rank 0] PRINT: Starting training... +[2025-07-05 08:47:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:47:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:47:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:47:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:47:41] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 08:47:41] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.49ms +[2025-07-05 08:47:43] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.19ms +[2025-07-05 08:47:43] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.19ms +[2025-07-05 08:47:44] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-05 08:47:44] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-05 08:47:46] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.50ms +[2025-07-05 08:47:46] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.50ms +[2025-07-05 08:47:48] [Rank 0] step:101/10000 train_time:8229ms step_avg:81.48ms +[2025-07-05 08:47:48] [Rank 0] step:101/10000 train_time:8229ms step_avg:81.48ms +[2025-07-05 08:47:49] [Rank 0] step:121/10000 train_time:9683ms step_avg:80.03ms +[2025-07-05 08:47:49] [Rank 0] step:121/10000 train_time:9683ms step_avg:80.03ms +[2025-07-05 08:47:51] [Rank 0] step:141/10000 train_time:11141ms step_avg:79.02ms +[2025-07-05 08:47:51] [Rank 0] step:141/10000 train_time:11141ms step_avg:79.02ms +[2025-07-05 08:47:52] [Rank 0] step:161/10000 train_time:12602ms step_avg:78.28ms +[2025-07-05 08:47:52] [Rank 0] step:161/10000 train_time:12602ms step_avg:78.28ms +[2025-07-05 08:47:54] [Rank 0] step:181/10000 train_time:14112ms step_avg:77.97ms +[2025-07-05 08:47:54] [Rank 0] step:181/10000 train_time:14112ms step_avg:77.97ms +[2025-07-05 08:47:56] [Rank 0] step:201/10000 train_time:16164ms step_avg:80.42ms +[2025-07-05 08:47:56] [Rank 0] step:201/10000 train_time:16164ms step_avg:80.42ms +[2025-07-05 08:47:57] [Rank 0] step:221/10000 train_time:17627ms step_avg:79.76ms +[2025-07-05 08:47:57] [Rank 0] step:221/10000 train_time:17627ms step_avg:79.76ms +[2025-07-05 08:47:59] [Rank 0] step:241/10000 train_time:19093ms step_avg:79.22ms +[2025-07-05 08:47:59] [Rank 0] step:241/10000 train_time:19093ms step_avg:79.22ms +[2025-07-05 08:48:00] [Rank 0] step:261/10000 train_time:20560ms step_avg:78.77ms +[2025-07-05 08:48:00] [Rank 0] step:261/10000 train_time:20560ms step_avg:78.77ms +[2025-07-05 08:48:02] [Rank 0] step:281/10000 train_time:22695ms step_avg:80.77ms +[2025-07-05 08:48:02] [Rank 0] step:281/10000 train_time:22695ms step_avg:80.77ms +[2025-07-05 08:48:04] [Rank 0] step:301/10000 train_time:24162ms step_avg:80.27ms +[2025-07-05 08:48:04] [Rank 0] step:301/10000 train_time:24162ms step_avg:80.27ms +[2025-07-05 08:48:05] [Rank 0] step:321/10000 train_time:25629ms step_avg:79.84ms +[2025-07-05 08:48:05] [Rank 0] step:321/10000 train_time:25629ms step_avg:79.84ms +[2025-07-05 08:48:07] [Rank 0] step:341/10000 train_time:27092ms step_avg:79.45ms +[2025-07-05 08:48:07] [Rank 0] step:341/10000 train_time:27092ms step_avg:79.45ms +[2025-07-05 08:48:08] [Rank 0] step:361/10000 train_time:28557ms step_avg:79.11ms +[2025-07-05 08:48:08] [Rank 0] step:361/10000 train_time:28557ms step_avg:79.11ms +[2025-07-05 08:48:10] [Rank 0] step:381/10000 train_time:30261ms step_avg:79.42ms +[2025-07-05 08:48:10] [Rank 0] step:381/10000 train_time:30261ms step_avg:79.42ms +[2025-07-05 08:48:11] [Rank 0] step:401/10000 train_time:31727ms step_avg:79.12ms +[2025-07-05 08:48:11] [Rank 0] step:401/10000 train_time:31727ms step_avg:79.12ms +[2025-07-05 08:48:13] [Rank 0] step:421/10000 train_time:33191ms step_avg:78.84ms +[2025-07-05 08:48:13] [Rank 0] step:421/10000 train_time:33191ms step_avg:78.84ms +[2025-07-05 08:48:14] [Rank 0] step:441/10000 train_time:34657ms step_avg:78.59ms +[2025-07-05 08:48:14] [Rank 0] step:441/10000 train_time:34657ms step_avg:78.59ms +[2025-07-05 08:48:16] [Rank 0] step:461/10000 train_time:36773ms step_avg:79.77ms +[2025-07-05 08:48:16] [Rank 0] step:461/10000 train_time:36773ms step_avg:79.77ms +[2025-07-05 08:48:18] [Rank 0] step:481/10000 train_time:38239ms step_avg:79.50ms +[2025-07-05 08:48:18] [Rank 0] step:481/10000 train_time:38239ms step_avg:79.50ms +[2025-07-05 08:48:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9439 val_loss:2.0532 train_time:39704ms step_avg:79.41ms +[2025-07-05 08:48:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9439 val_loss:2.0532 train_time:39704ms step_avg:79.41ms +[2025-07-05 08:48:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:48:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0aaadcdfb1cb86164d6af7247b97bf57d2b982d7 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "762edfa4-fa14-4431-a05d-7ac41ba73122", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_762edfa4-fa14-4431-a05d-7ac41ba73122.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_762edfa4-fa14-4431-a05d-7ac41ba73122.txt new file mode 100644 index 0000000000000000000000000000000000000000..32bd6b9e839314fbc896743385fd1ee529cc7a6b --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_762edfa4-fa14-4431-a05d-7ac41ba73122.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:13:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:13:57 2025 --- +[2025-07-05 09:13:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:13:57 2025 --- +[2025-07-05 09:13:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:13:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:13:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:13:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:13:57] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:13:57] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:13:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49 +[2025-07-05 09:13:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49 +[2025-07-05 09:13:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:13:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:13:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:13:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:13:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:13:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:13:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:13:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:13:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:13:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:13:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:13:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:14:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:14:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:14:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:14:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:14:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:14:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:14:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:14:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:14:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:14:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:14:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:14:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:14:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:14:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:14:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:14:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:14:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:14:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:14:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:14:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:14:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:14:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:14:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:14:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:14:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:14:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:15:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:15:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:15:09] [Rank 0] PRINT: Starting training... +[2025-07-05 09:15:09] [Rank 0] PRINT: Starting training... +[2025-07-05 09:15:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:15:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:15:17] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.90ms +[2025-07-05 09:15:17] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.90ms +[2025-07-05 09:15:19] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.25ms +[2025-07-05 09:15:19] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.25ms +[2025-07-05 09:15:20] [Rank 0] step:61/10000 train_time:4458ms step_avg:73.08ms +[2025-07-05 09:15:20] [Rank 0] step:61/10000 train_time:4458ms step_avg:73.08ms +[2025-07-05 09:15:22] [Rank 0] step:81/10000 train_time:5914ms step_avg:73.01ms +[2025-07-05 09:15:22] [Rank 0] step:81/10000 train_time:5914ms step_avg:73.01ms +[2025-07-05 09:15:24] [Rank 0] step:101/10000 train_time:8029ms step_avg:79.49ms +[2025-07-05 09:15:24] [Rank 0] step:101/10000 train_time:8029ms step_avg:79.49ms +[2025-07-05 09:15:25] [Rank 0] step:121/10000 train_time:9485ms step_avg:78.39ms +[2025-07-05 09:15:25] [Rank 0] step:121/10000 train_time:9485ms step_avg:78.39ms +[2025-07-05 09:15:27] [Rank 0] step:141/10000 train_time:10945ms step_avg:77.62ms +[2025-07-05 09:15:27] [Rank 0] step:141/10000 train_time:10945ms step_avg:77.62ms +[2025-07-05 09:15:28] [Rank 0] step:161/10000 train_time:12406ms step_avg:77.05ms +[2025-07-05 09:15:28] [Rank 0] step:161/10000 train_time:12406ms step_avg:77.05ms +[2025-07-05 09:15:30] [Rank 0] step:181/10000 train_time:13922ms step_avg:76.92ms +[2025-07-05 09:15:30] [Rank 0] step:181/10000 train_time:13922ms step_avg:76.92ms +[2025-07-05 09:15:31] [Rank 0] step:201/10000 train_time:15570ms step_avg:77.46ms +[2025-07-05 09:15:31] [Rank 0] step:201/10000 train_time:15570ms step_avg:77.46ms +[2025-07-05 09:15:33] [Rank 0] step:221/10000 train_time:17035ms step_avg:77.08ms +[2025-07-05 09:15:33] [Rank 0] step:221/10000 train_time:17035ms step_avg:77.08ms +[2025-07-05 09:15:34] [Rank 0] step:241/10000 train_time:18501ms step_avg:76.77ms +[2025-07-05 09:15:34] [Rank 0] step:241/10000 train_time:18501ms step_avg:76.77ms +[2025-07-05 09:15:36] [Rank 0] step:261/10000 train_time:19967ms step_avg:76.50ms +[2025-07-05 09:15:36] [Rank 0] step:261/10000 train_time:19967ms step_avg:76.50ms +[2025-07-05 09:15:38] [Rank 0] step:281/10000 train_time:22110ms step_avg:78.68ms +[2025-07-05 09:15:38] [Rank 0] step:281/10000 train_time:22110ms step_avg:78.68ms +[2025-07-05 09:15:39] [Rank 0] step:301/10000 train_time:23576ms step_avg:78.32ms +[2025-07-05 09:15:39] [Rank 0] step:301/10000 train_time:23576ms step_avg:78.32ms +[2025-07-05 09:15:41] [Rank 0] step:321/10000 train_time:25045ms step_avg:78.02ms +[2025-07-05 09:15:41] [Rank 0] step:321/10000 train_time:25045ms step_avg:78.02ms +[2025-07-05 09:15:42] [Rank 0] step:341/10000 train_time:26513ms step_avg:77.75ms +[2025-07-05 09:15:42] [Rank 0] step:341/10000 train_time:26513ms step_avg:77.75ms +[2025-07-05 09:15:44] [Rank 0] step:361/10000 train_time:28237ms step_avg:78.22ms +[2025-07-05 09:15:44] [Rank 0] step:361/10000 train_time:28237ms step_avg:78.22ms +[2025-07-05 09:15:46] [Rank 0] step:381/10000 train_time:30111ms step_avg:79.03ms +[2025-07-05 09:15:46] [Rank 0] step:381/10000 train_time:30111ms step_avg:79.03ms +[2025-07-05 09:15:47] [Rank 0] step:401/10000 train_time:31578ms step_avg:78.75ms +[2025-07-05 09:15:47] [Rank 0] step:401/10000 train_time:31578ms step_avg:78.75ms +[2025-07-05 09:15:49] [Rank 0] step:421/10000 train_time:33045ms step_avg:78.49ms +[2025-07-05 09:15:49] [Rank 0] step:421/10000 train_time:33045ms step_avg:78.49ms +[2025-07-05 09:15:50] [Rank 0] step:441/10000 train_time:34512ms step_avg:78.26ms +[2025-07-05 09:15:50] [Rank 0] step:441/10000 train_time:34512ms step_avg:78.26ms +[2025-07-05 09:15:52] [Rank 0] step:461/10000 train_time:36231ms step_avg:78.59ms +[2025-07-05 09:15:52] [Rank 0] step:461/10000 train_time:36231ms step_avg:78.59ms +[2025-07-05 09:15:53] [Rank 0] step:481/10000 train_time:37699ms step_avg:78.38ms +[2025-07-05 09:15:53] [Rank 0] step:481/10000 train_time:37699ms step_avg:78.38ms +[2025-07-05 09:15:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:56] [Rank 0] PRINT: step:500/10000 train_loss:4.9445 val_loss:2.0539 train_time:39168ms step_avg:78.34ms +[2025-07-05 09:15:56] [Rank 0] PRINT: step:500/10000 train_loss:4.9445 val_loss:2.0539 train_time:39168ms step_avg:78.34ms +[2025-07-05 09:15:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:15:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..90df087f9a4e6207e1440f4444b87b48ac52640e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1fd16153-b19e-4408-ad41-007a9302c0ae", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/training_log_1fd16153-b19e-4408-ad41-007a9302c0ae.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/training_log_1fd16153-b19e-4408-ad41-007a9302c0ae.txt new file mode 100644 index 0000000000000000000000000000000000000000..a86baa9f4fb306637f33d3d3104792c3c87c2553 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50/training_log_1fd16153-b19e-4408-ad41-007a9302c0ae.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:41:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:41:21 2025 --- +[2025-07-05 09:41:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:41:21 2025 --- +[2025-07-05 09:41:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:41:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:41:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:41:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:41:21] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:41:21] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:41:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50 +[2025-07-05 09:41:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_50 +[2025-07-05 09:41:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:41:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:41:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:41:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:41:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:41:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:41:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:41:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:41:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:41:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:41:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:41:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:41:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:41:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:41:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:41:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:41:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:41:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:41:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:41:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:41:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:41:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:41:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:41:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:41:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:41:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:41:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:41:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:41:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:41:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:41:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:41:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:41:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:41:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:41:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:41:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:41:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:41:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:42:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:42:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:42:31] [Rank 0] PRINT: Starting training... +[2025-07-05 09:42:31] [Rank 0] PRINT: Starting training... +[2025-07-05 09:42:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:42:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:42:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:42:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:42:39] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 09:42:39] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-05 09:42:41] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.18ms +[2025-07-05 09:42:41] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.18ms +[2025-07-05 09:42:42] [Rank 0] step:61/10000 train_time:4454ms step_avg:73.01ms +[2025-07-05 09:42:42] [Rank 0] step:61/10000 train_time:4454ms step_avg:73.01ms +[2025-07-05 09:42:44] [Rank 0] step:81/10000 train_time:5911ms step_avg:72.98ms +[2025-07-05 09:42:44] [Rank 0] step:81/10000 train_time:5911ms step_avg:72.98ms +[2025-07-05 09:42:45] [Rank 0] step:101/10000 train_time:7614ms step_avg:75.39ms +[2025-07-05 09:42:45] [Rank 0] step:101/10000 train_time:7614ms step_avg:75.39ms +[2025-07-05 09:42:47] [Rank 0] step:121/10000 train_time:9070ms step_avg:74.96ms +[2025-07-05 09:42:47] [Rank 0] step:121/10000 train_time:9070ms step_avg:74.96ms +[2025-07-05 09:42:48] [Rank 0] step:141/10000 train_time:10530ms step_avg:74.68ms +[2025-07-05 09:42:48] [Rank 0] step:141/10000 train_time:10530ms step_avg:74.68ms +[2025-07-05 09:42:50] [Rank 0] step:161/10000 train_time:11994ms step_avg:74.50ms +[2025-07-05 09:42:50] [Rank 0] step:161/10000 train_time:11994ms step_avg:74.50ms +[2025-07-05 09:42:52] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-05 09:42:52] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-05 09:42:53] [Rank 0] step:201/10000 train_time:15595ms step_avg:77.59ms +[2025-07-05 09:42:53] [Rank 0] step:201/10000 train_time:15595ms step_avg:77.59ms +[2025-07-05 09:42:55] [Rank 0] step:221/10000 train_time:17060ms step_avg:77.20ms +[2025-07-05 09:42:55] [Rank 0] step:221/10000 train_time:17060ms step_avg:77.20ms +[2025-07-05 09:42:56] [Rank 0] step:241/10000 train_time:18525ms step_avg:76.87ms +[2025-07-05 09:42:56] [Rank 0] step:241/10000 train_time:18525ms step_avg:76.87ms +[2025-07-05 09:42:58] [Rank 0] step:261/10000 train_time:19991ms step_avg:76.59ms +[2025-07-05 09:42:58] [Rank 0] step:261/10000 train_time:19991ms step_avg:76.59ms +[2025-07-05 09:43:00] [Rank 0] step:281/10000 train_time:22123ms step_avg:78.73ms +[2025-07-05 09:43:00] [Rank 0] step:281/10000 train_time:22123ms step_avg:78.73ms +[2025-07-05 09:43:01] [Rank 0] step:301/10000 train_time:23591ms step_avg:78.38ms +[2025-07-05 09:43:01] [Rank 0] step:301/10000 train_time:23591ms step_avg:78.38ms +[2025-07-05 09:43:03] [Rank 0] step:321/10000 train_time:25058ms step_avg:78.06ms +[2025-07-05 09:43:03] [Rank 0] step:321/10000 train_time:25058ms step_avg:78.06ms +[2025-07-05 09:43:04] [Rank 0] step:341/10000 train_time:26525ms step_avg:77.79ms +[2025-07-05 09:43:04] [Rank 0] step:341/10000 train_time:26525ms step_avg:77.79ms +[2025-07-05 09:43:06] [Rank 0] step:361/10000 train_time:28044ms step_avg:77.68ms +[2025-07-05 09:43:06] [Rank 0] step:361/10000 train_time:28044ms step_avg:77.68ms +[2025-07-05 09:43:08] [Rank 0] step:381/10000 train_time:30126ms step_avg:79.07ms +[2025-07-05 09:43:08] [Rank 0] step:381/10000 train_time:30126ms step_avg:79.07ms +[2025-07-05 09:43:09] [Rank 0] step:401/10000 train_time:31593ms step_avg:78.79ms +[2025-07-05 09:43:09] [Rank 0] step:401/10000 train_time:31593ms step_avg:78.79ms +[2025-07-05 09:43:11] [Rank 0] step:421/10000 train_time:33063ms step_avg:78.53ms +[2025-07-05 09:43:11] [Rank 0] step:421/10000 train_time:33063ms step_avg:78.53ms +[2025-07-05 09:43:12] [Rank 0] step:441/10000 train_time:34532ms step_avg:78.30ms +[2025-07-05 09:43:12] [Rank 0] step:441/10000 train_time:34532ms step_avg:78.30ms +[2025-07-05 09:43:14] [Rank 0] step:461/10000 train_time:36654ms step_avg:79.51ms +[2025-07-05 09:43:14] [Rank 0] step:461/10000 train_time:36654ms step_avg:79.51ms +[2025-07-05 09:43:16] [Rank 0] step:481/10000 train_time:38120ms step_avg:79.25ms +[2025-07-05 09:43:16] [Rank 0] step:481/10000 train_time:38120ms step_avg:79.25ms +[2025-07-05 09:43:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:43:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:43:18] [Rank 0] PRINT: step:500/10000 train_loss:4.9446 val_loss:2.0569 train_time:39587ms step_avg:79.17ms +[2025-07-05 09:43:18] [Rank 0] PRINT: step:500/10000 train_loss:4.9446 val_loss:2.0569 train_time:39587ms step_avg:79.17ms +[2025-07-05 09:43:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:43:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..869fc05c194fab68606ebd59c742b03d33752e5d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d6d53afc-ce86-4406-86ed-e6a6e084876f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/training_log_d6d53afc-ce86-4406-86ed-e6a6e084876f.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/training_log_d6d53afc-ce86-4406-86ed-e6a6e084876f.txt new file mode 100644 index 0000000000000000000000000000000000000000..3041c979253c035342ef951487a2368073d20a38 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51/training_log_d6d53afc-ce86-4406-86ed-e6a6e084876f.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:08:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:08:39 2025 --- +[2025-07-05 10:08:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:08:39 2025 --- +[2025-07-05 10:08:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:08:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:08:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:08:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:08:39] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:08:39] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:08:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51 +[2025-07-05 10:08:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_51 +[2025-07-05 10:08:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:08:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:08:39] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:08:39] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:08:39] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:08:39] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:08:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:08:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:08:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:08:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:08:41] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:08:41] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:08:42] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:08:42] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:08:42] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:08:42] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:08:42] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:08:42] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:08:42] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:08:42] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:08:42] [Rank 0] PRINT: Model returns: +[2025-07-05 10:08:42] [Rank 0] PRINT: Model returns: +[2025-07-05 10:08:42] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:08:42] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:08:42] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:08:42] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:08:42] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:08:42] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:08:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:08:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:08:42] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:08:42] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:08:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:08:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:08:42] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:08:42] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:08:42] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:08:42] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:09:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:09:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:09:49] [Rank 0] PRINT: Starting training... +[2025-07-05 10:09:49] [Rank 0] PRINT: Starting training... +[2025-07-05 10:09:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:09:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:09:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:09:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:09:58] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.22ms +[2025-07-05 10:09:58] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.22ms +[2025-07-05 10:10:00] [Rank 0] step:41/10000 train_time:3199ms step_avg:78.02ms +[2025-07-05 10:10:00] [Rank 0] step:41/10000 train_time:3199ms step_avg:78.02ms +[2025-07-05 10:10:01] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.26ms +[2025-07-05 10:10:01] [Rank 0] step:61/10000 train_time:4652ms step_avg:76.26ms +[2025-07-05 10:10:03] [Rank 0] step:81/10000 train_time:6104ms step_avg:75.36ms +[2025-07-05 10:10:03] [Rank 0] step:81/10000 train_time:6104ms step_avg:75.36ms +[2025-07-05 10:10:05] [Rank 0] step:101/10000 train_time:8217ms step_avg:81.35ms +[2025-07-05 10:10:05] [Rank 0] step:101/10000 train_time:8217ms step_avg:81.35ms +[2025-07-05 10:10:06] [Rank 0] step:121/10000 train_time:9673ms step_avg:79.94ms +[2025-07-05 10:10:06] [Rank 0] step:121/10000 train_time:9673ms step_avg:79.94ms +[2025-07-05 10:10:08] [Rank 0] step:141/10000 train_time:11132ms step_avg:78.95ms +[2025-07-05 10:10:08] [Rank 0] step:141/10000 train_time:11132ms step_avg:78.95ms +[2025-07-05 10:10:09] [Rank 0] step:161/10000 train_time:12588ms step_avg:78.19ms +[2025-07-05 10:10:09] [Rank 0] step:161/10000 train_time:12588ms step_avg:78.19ms +[2025-07-05 10:10:11] [Rank 0] step:181/10000 train_time:14308ms step_avg:79.05ms +[2025-07-05 10:10:11] [Rank 0] step:181/10000 train_time:14308ms step_avg:79.05ms +[2025-07-05 10:10:13] [Rank 0] step:201/10000 train_time:16183ms step_avg:80.51ms +[2025-07-05 10:10:13] [Rank 0] step:201/10000 train_time:16183ms step_avg:80.51ms +[2025-07-05 10:10:14] [Rank 0] step:221/10000 train_time:17644ms step_avg:79.84ms +[2025-07-05 10:10:14] [Rank 0] step:221/10000 train_time:17644ms step_avg:79.84ms +[2025-07-05 10:10:16] [Rank 0] step:241/10000 train_time:19109ms step_avg:79.29ms +[2025-07-05 10:10:16] [Rank 0] step:241/10000 train_time:19109ms step_avg:79.29ms +[2025-07-05 10:10:17] [Rank 0] step:261/10000 train_time:20578ms step_avg:78.84ms +[2025-07-05 10:10:17] [Rank 0] step:261/10000 train_time:20578ms step_avg:78.84ms +[2025-07-05 10:10:19] [Rank 0] step:281/10000 train_time:22714ms step_avg:80.83ms +[2025-07-05 10:10:19] [Rank 0] step:281/10000 train_time:22714ms step_avg:80.83ms +[2025-07-05 10:10:21] [Rank 0] step:301/10000 train_time:24180ms step_avg:80.33ms +[2025-07-05 10:10:21] [Rank 0] step:301/10000 train_time:24180ms step_avg:80.33ms +[2025-07-05 10:10:22] [Rank 0] step:321/10000 train_time:25646ms step_avg:79.90ms +[2025-07-05 10:10:22] [Rank 0] step:321/10000 train_time:25646ms step_avg:79.90ms +[2025-07-05 10:10:24] [Rank 0] step:341/10000 train_time:27115ms step_avg:79.52ms +[2025-07-05 10:10:24] [Rank 0] step:341/10000 train_time:27115ms step_avg:79.52ms +[2025-07-05 10:10:26] [Rank 0] step:361/10000 train_time:28843ms step_avg:79.90ms +[2025-07-05 10:10:26] [Rank 0] step:361/10000 train_time:28843ms step_avg:79.90ms +[2025-07-05 10:10:27] [Rank 0] step:381/10000 train_time:30714ms step_avg:80.62ms +[2025-07-05 10:10:27] [Rank 0] step:381/10000 train_time:30714ms step_avg:80.62ms +[2025-07-05 10:10:29] [Rank 0] step:401/10000 train_time:32184ms step_avg:80.26ms +[2025-07-05 10:10:29] [Rank 0] step:401/10000 train_time:32184ms step_avg:80.26ms +[2025-07-05 10:10:30] [Rank 0] step:421/10000 train_time:33647ms step_avg:79.92ms +[2025-07-05 10:10:30] [Rank 0] step:421/10000 train_time:33647ms step_avg:79.92ms +[2025-07-05 10:10:32] [Rank 0] step:441/10000 train_time:35114ms step_avg:79.62ms +[2025-07-05 10:10:32] [Rank 0] step:441/10000 train_time:35114ms step_avg:79.62ms +[2025-07-05 10:10:34] [Rank 0] step:461/10000 train_time:37243ms step_avg:80.79ms +[2025-07-05 10:10:34] [Rank 0] step:461/10000 train_time:37243ms step_avg:80.79ms +[2025-07-05 10:10:35] [Rank 0] step:481/10000 train_time:38710ms step_avg:80.48ms +[2025-07-05 10:10:35] [Rank 0] step:481/10000 train_time:38710ms step_avg:80.48ms +[2025-07-05 10:10:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:38] [Rank 0] PRINT: step:500/10000 train_loss:4.9444 val_loss:2.0570 train_time:40175ms step_avg:80.35ms +[2025-07-05 10:10:38] [Rank 0] PRINT: step:500/10000 train_loss:4.9444 val_loss:2.0570 train_time:40175ms step_avg:80.35ms +[2025-07-05 10:10:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:10:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f733564859ca529670cd8985bbee92e9bcf944a8 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "501f1bd1-a3e1-4cd0-ae33-22319eb75428", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_501f1bd1-a3e1-4cd0-ae33-22319eb75428.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_501f1bd1-a3e1-4cd0-ae33-22319eb75428.txt new file mode 100644 index 0000000000000000000000000000000000000000..09b995bbf9eee39dd05f9a72d9619623cbb1f7dc --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_501f1bd1-a3e1-4cd0-ae33-22319eb75428.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:06:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:06:30 2025 --- +[2025-07-05 08:06:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:06:30 2025 --- +[2025-07-05 08:06:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:06:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:06:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:06:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:06:30] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:06:30] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:06:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42 +[2025-07-05 08:06:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42 +[2025-07-05 08:06:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:06:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:06:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:06:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:06:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:06:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:06:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:06:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:06:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:06:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:06:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:06:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:06:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:06:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:06:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:06:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:06:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:06:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:06:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:06:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:06:34] [Rank 0] PRINT: Model returns: +[2025-07-05 08:06:34] [Rank 0] PRINT: Model returns: +[2025-07-05 08:06:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:06:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:06:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:06:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:06:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:06:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:06:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:06:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:06:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:06:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:06:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:06:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:06:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:06:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:06:34] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:06:34] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:08:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:08:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:08:15] [Rank 0] PRINT: Starting training... +[2025-07-05 08:08:15] [Rank 0] PRINT: Starting training... +[2025-07-05 08:08:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:08:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:08:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:08:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:08:24] [Rank 0] step:21/10000 train_time:1654ms step_avg:78.78ms +[2025-07-05 08:08:24] [Rank 0] step:21/10000 train_time:1654ms step_avg:78.78ms +[2025-07-05 08:08:25] [Rank 0] step:41/10000 train_time:3217ms step_avg:78.45ms +[2025-07-05 08:08:25] [Rank 0] step:41/10000 train_time:3217ms step_avg:78.45ms +[2025-07-05 08:08:27] [Rank 0] step:61/10000 train_time:4678ms step_avg:76.69ms +[2025-07-05 08:08:27] [Rank 0] step:61/10000 train_time:4678ms step_avg:76.69ms +[2025-07-05 08:08:28] [Rank 0] step:81/10000 train_time:6244ms step_avg:77.08ms +[2025-07-05 08:08:28] [Rank 0] step:81/10000 train_time:6244ms step_avg:77.08ms +[2025-07-05 08:08:30] [Rank 0] step:101/10000 train_time:7749ms step_avg:76.73ms +[2025-07-05 08:08:30] [Rank 0] step:101/10000 train_time:7749ms step_avg:76.73ms +[2025-07-05 08:08:31] [Rank 0] step:121/10000 train_time:9216ms step_avg:76.16ms +[2025-07-05 08:08:31] [Rank 0] step:121/10000 train_time:9216ms step_avg:76.16ms +[2025-07-05 08:08:33] [Rank 0] step:141/10000 train_time:10685ms step_avg:75.78ms +[2025-07-05 08:08:33] [Rank 0] step:141/10000 train_time:10685ms step_avg:75.78ms +[2025-07-05 08:08:34] [Rank 0] step:161/10000 train_time:12154ms step_avg:75.49ms +[2025-07-05 08:08:34] [Rank 0] step:161/10000 train_time:12154ms step_avg:75.49ms +[2025-07-05 08:08:36] [Rank 0] step:181/10000 train_time:13620ms step_avg:75.25ms +[2025-07-05 08:08:36] [Rank 0] step:181/10000 train_time:13620ms step_avg:75.25ms +[2025-07-05 08:08:38] [Rank 0] step:201/10000 train_time:15734ms step_avg:78.28ms +[2025-07-05 08:08:38] [Rank 0] step:201/10000 train_time:15734ms step_avg:78.28ms +[2025-07-05 08:08:39] [Rank 0] step:221/10000 train_time:17202ms step_avg:77.84ms +[2025-07-05 08:08:39] [Rank 0] step:221/10000 train_time:17202ms step_avg:77.84ms +[2025-07-05 08:08:41] [Rank 0] step:241/10000 train_time:18671ms step_avg:77.47ms +[2025-07-05 08:08:41] [Rank 0] step:241/10000 train_time:18671ms step_avg:77.47ms +[2025-07-05 08:08:42] [Rank 0] step:261/10000 train_time:20356ms step_avg:77.99ms +[2025-07-05 08:08:42] [Rank 0] step:261/10000 train_time:20356ms step_avg:77.99ms +[2025-07-05 08:08:44] [Rank 0] step:281/10000 train_time:22180ms step_avg:78.93ms +[2025-07-05 08:08:44] [Rank 0] step:281/10000 train_time:22180ms step_avg:78.93ms +[2025-07-05 08:08:46] [Rank 0] step:301/10000 train_time:23650ms step_avg:78.57ms +[2025-07-05 08:08:46] [Rank 0] step:301/10000 train_time:23650ms step_avg:78.57ms +[2025-07-05 08:08:47] [Rank 0] step:321/10000 train_time:25123ms step_avg:78.26ms +[2025-07-05 08:08:47] [Rank 0] step:321/10000 train_time:25123ms step_avg:78.26ms +[2025-07-05 08:08:49] [Rank 0] step:341/10000 train_time:26594ms step_avg:77.99ms +[2025-07-05 08:08:49] [Rank 0] step:341/10000 train_time:26594ms step_avg:77.99ms +[2025-07-05 08:08:51] [Rank 0] step:361/10000 train_time:28117ms step_avg:77.89ms +[2025-07-05 08:08:51] [Rank 0] step:361/10000 train_time:28117ms step_avg:77.89ms +[2025-07-05 08:08:52] [Rank 0] step:381/10000 train_time:30197ms step_avg:79.26ms +[2025-07-05 08:08:52] [Rank 0] step:381/10000 train_time:30197ms step_avg:79.26ms +[2025-07-05 08:08:54] [Rank 0] step:401/10000 train_time:31669ms step_avg:78.97ms +[2025-07-05 08:08:54] [Rank 0] step:401/10000 train_time:31669ms step_avg:78.97ms +[2025-07-05 08:08:55] [Rank 0] step:421/10000 train_time:33139ms step_avg:78.71ms +[2025-07-05 08:08:55] [Rank 0] step:421/10000 train_time:33139ms step_avg:78.71ms +[2025-07-05 08:08:57] [Rank 0] step:441/10000 train_time:34611ms step_avg:78.48ms +[2025-07-05 08:08:57] [Rank 0] step:441/10000 train_time:34611ms step_avg:78.48ms +[2025-07-05 08:08:58] [Rank 0] step:461/10000 train_time:36418ms step_avg:79.00ms +[2025-07-05 08:08:58] [Rank 0] step:461/10000 train_time:36418ms step_avg:79.00ms +[2025-07-05 08:09:00] [Rank 0] step:481/10000 train_time:37991ms step_avg:78.98ms +[2025-07-05 08:09:00] [Rank 0] step:481/10000 train_time:37991ms step_avg:78.98ms +[2025-07-05 08:09:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:09:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:09:02] [Rank 0] PRINT: step:500/10000 train_loss:3.3840 val_loss:1.6123 train_time:39463ms step_avg:78.93ms +[2025-07-05 08:09:02] [Rank 0] PRINT: step:500/10000 train_loss:3.3840 val_loss:1.6123 train_time:39463ms step_avg:78.93ms +[2025-07-05 08:09:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:09:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b5313fe6fd51d1bea7eafbfc70e2d3402c591fc8 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "678c79d5-b1d7-4892-b306-a0b2a90c3d1b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_678c79d5-b1d7-4892-b306-a0b2a90c3d1b.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_678c79d5-b1d7-4892-b306-a0b2a90c3d1b.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f4b89a23732b3c7c377beec14fda2070c391e55 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_678c79d5-b1d7-4892-b306-a0b2a90c3d1b.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:35:28] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:35:28 2025 --- +[2025-07-05 08:35:28] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:35:28 2025 --- +[2025-07-05 08:35:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:35:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:35:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:35:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:35:28] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:35:28] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:35:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43 +[2025-07-05 08:35:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43 +[2025-07-05 08:35:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:35:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:35:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:35:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:35:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:35:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:35:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:35:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:35:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:35:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:35:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:35:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:35:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:35:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:35:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:35:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:35:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:35:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:35:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:35:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:35:31] [Rank 0] PRINT: Model returns: +[2025-07-05 08:35:31] [Rank 0] PRINT: Model returns: +[2025-07-05 08:35:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:35:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:35:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:35:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:35:31] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:35:31] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:35:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:35:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:35:31] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:35:31] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:35:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:35:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:35:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:35:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:35:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:35:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:36:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:36:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:36:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:36:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:36:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:36:45] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.70ms +[2025-07-05 08:36:45] [Rank 0] step:21/10000 train_time:1548ms step_avg:73.70ms +[2025-07-05 08:36:46] [Rank 0] step:41/10000 train_time:3004ms step_avg:73.26ms +[2025-07-05 08:36:46] [Rank 0] step:41/10000 train_time:3004ms step_avg:73.26ms +[2025-07-05 08:36:48] [Rank 0] step:61/10000 train_time:4462ms step_avg:73.15ms +[2025-07-05 08:36:48] [Rank 0] step:61/10000 train_time:4462ms step_avg:73.15ms +[2025-07-05 08:36:49] [Rank 0] step:81/10000 train_time:5924ms step_avg:73.14ms +[2025-07-05 08:36:49] [Rank 0] step:81/10000 train_time:5924ms step_avg:73.14ms +[2025-07-05 08:36:51] [Rank 0] step:101/10000 train_time:7628ms step_avg:75.52ms +[2025-07-05 08:36:51] [Rank 0] step:101/10000 train_time:7628ms step_avg:75.52ms +[2025-07-05 08:36:52] [Rank 0] step:121/10000 train_time:9093ms step_avg:75.15ms +[2025-07-05 08:36:52] [Rank 0] step:121/10000 train_time:9093ms step_avg:75.15ms +[2025-07-05 08:36:54] [Rank 0] step:141/10000 train_time:10559ms step_avg:74.89ms +[2025-07-05 08:36:54] [Rank 0] step:141/10000 train_time:10559ms step_avg:74.89ms +[2025-07-05 08:36:55] [Rank 0] step:161/10000 train_time:12026ms step_avg:74.70ms +[2025-07-05 08:36:55] [Rank 0] step:161/10000 train_time:12026ms step_avg:74.70ms +[2025-07-05 08:36:57] [Rank 0] step:181/10000 train_time:13494ms step_avg:74.55ms +[2025-07-05 08:36:57] [Rank 0] step:181/10000 train_time:13494ms step_avg:74.55ms +[2025-07-05 08:36:59] [Rank 0] step:201/10000 train_time:15597ms step_avg:77.60ms +[2025-07-05 08:36:59] [Rank 0] step:201/10000 train_time:15597ms step_avg:77.60ms +[2025-07-05 08:37:00] [Rank 0] step:221/10000 train_time:17063ms step_avg:77.21ms +[2025-07-05 08:37:00] [Rank 0] step:221/10000 train_time:17063ms step_avg:77.21ms +[2025-07-05 08:37:02] [Rank 0] step:241/10000 train_time:18531ms step_avg:76.89ms +[2025-07-05 08:37:02] [Rank 0] step:241/10000 train_time:18531ms step_avg:76.89ms +[2025-07-05 08:37:03] [Rank 0] step:261/10000 train_time:20000ms step_avg:76.63ms +[2025-07-05 08:37:03] [Rank 0] step:261/10000 train_time:20000ms step_avg:76.63ms +[2025-07-05 08:37:05] [Rank 0] step:281/10000 train_time:21502ms step_avg:76.52ms +[2025-07-05 08:37:05] [Rank 0] step:281/10000 train_time:21502ms step_avg:76.52ms +[2025-07-05 08:37:06] [Rank 0] step:301/10000 train_time:22972ms step_avg:76.32ms +[2025-07-05 08:37:06] [Rank 0] step:301/10000 train_time:22972ms step_avg:76.32ms +[2025-07-05 08:37:08] [Rank 0] step:321/10000 train_time:24440ms step_avg:76.14ms +[2025-07-05 08:37:08] [Rank 0] step:321/10000 train_time:24440ms step_avg:76.14ms +[2025-07-05 08:37:09] [Rank 0] step:341/10000 train_time:25909ms step_avg:75.98ms +[2025-07-05 08:37:09] [Rank 0] step:341/10000 train_time:25909ms step_avg:75.98ms +[2025-07-05 08:37:11] [Rank 0] step:361/10000 train_time:27839ms step_avg:77.12ms +[2025-07-05 08:37:11] [Rank 0] step:361/10000 train_time:27839ms step_avg:77.12ms +[2025-07-05 08:37:13] [Rank 0] step:381/10000 train_time:29342ms step_avg:77.01ms +[2025-07-05 08:37:13] [Rank 0] step:381/10000 train_time:29342ms step_avg:77.01ms +[2025-07-05 08:37:14] [Rank 0] step:401/10000 train_time:30868ms step_avg:76.98ms +[2025-07-05 08:37:14] [Rank 0] step:401/10000 train_time:30868ms step_avg:76.98ms +[2025-07-05 08:37:16] [Rank 0] step:421/10000 train_time:32338ms step_avg:76.81ms +[2025-07-05 08:37:16] [Rank 0] step:421/10000 train_time:32338ms step_avg:76.81ms +[2025-07-05 08:37:17] [Rank 0] step:441/10000 train_time:33808ms step_avg:76.66ms +[2025-07-05 08:37:17] [Rank 0] step:441/10000 train_time:33808ms step_avg:76.66ms +[2025-07-05 08:37:19] [Rank 0] step:461/10000 train_time:35919ms step_avg:77.91ms +[2025-07-05 08:37:19] [Rank 0] step:461/10000 train_time:35919ms step_avg:77.91ms +[2025-07-05 08:37:21] [Rank 0] step:481/10000 train_time:37388ms step_avg:77.73ms +[2025-07-05 08:37:21] [Rank 0] step:481/10000 train_time:37388ms step_avg:77.73ms +[2025-07-05 08:37:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:37:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:37:23] [Rank 0] PRINT: step:500/10000 train_loss:3.3874 val_loss:1.6145 train_time:38859ms step_avg:77.72ms +[2025-07-05 08:37:23] [Rank 0] PRINT: step:500/10000 train_loss:3.3874 val_loss:1.6145 train_time:38859ms step_avg:77.72ms +[2025-07-05 08:37:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:37:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fa05174c95a1c38bd9a4b70e285d747bd98bc8b6 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "35f112db-61fe-481d-86dd-0caa858bb18b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/training_log_35f112db-61fe-481d-86dd-0caa858bb18b.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/training_log_35f112db-61fe-481d-86dd-0caa858bb18b.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd1a0293df5c021b215ec72bb8850f039737e3f5 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44/training_log_35f112db-61fe-481d-86dd-0caa858bb18b.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:02:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:02:31 2025 --- +[2025-07-05 09:02:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:02:31 2025 --- +[2025-07-05 09:02:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:02:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:02:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:02:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:02:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:02:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:02:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44 +[2025-07-05 09:02:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_44 +[2025-07-05 09:02:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:02:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:02:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:02:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:02:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:02:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:02:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:02:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:02:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:02:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:02:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:02:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:02:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:02:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:02:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:02:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:02:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:02:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:02:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:02:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:02:35] [Rank 0] PRINT: Model returns: +[2025-07-05 09:02:35] [Rank 0] PRINT: Model returns: +[2025-07-05 09:02:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:02:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:02:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:02:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:02:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:02:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:02:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:02:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:02:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:02:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:02:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:02:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:02:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:02:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:02:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:02:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:03:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:03:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:03:51] [Rank 0] PRINT: Starting training... +[2025-07-05 09:03:51] [Rank 0] PRINT: Starting training... +[2025-07-05 09:03:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:03:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:03:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:03:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:04:01] [Rank 0] step:21/10000 train_time:1961ms step_avg:93.38ms +[2025-07-05 09:04:01] [Rank 0] step:21/10000 train_time:1961ms step_avg:93.38ms +[2025-07-05 09:04:02] [Rank 0] step:41/10000 train_time:3420ms step_avg:83.41ms +[2025-07-05 09:04:02] [Rank 0] step:41/10000 train_time:3420ms step_avg:83.41ms +[2025-07-05 09:04:04] [Rank 0] step:61/10000 train_time:4881ms step_avg:80.01ms +[2025-07-05 09:04:04] [Rank 0] step:61/10000 train_time:4881ms step_avg:80.01ms +[2025-07-05 09:04:05] [Rank 0] step:81/10000 train_time:6346ms step_avg:78.34ms +[2025-07-05 09:04:05] [Rank 0] step:81/10000 train_time:6346ms step_avg:78.34ms +[2025-07-05 09:04:07] [Rank 0] step:101/10000 train_time:8051ms step_avg:79.71ms +[2025-07-05 09:04:07] [Rank 0] step:101/10000 train_time:8051ms step_avg:79.71ms +[2025-07-05 09:04:08] [Rank 0] step:121/10000 train_time:9518ms step_avg:78.66ms +[2025-07-05 09:04:08] [Rank 0] step:121/10000 train_time:9518ms step_avg:78.66ms +[2025-07-05 09:04:10] [Rank 0] step:141/10000 train_time:10986ms step_avg:77.91ms +[2025-07-05 09:04:10] [Rank 0] step:141/10000 train_time:10986ms step_avg:77.91ms +[2025-07-05 09:04:11] [Rank 0] step:161/10000 train_time:12454ms step_avg:77.35ms +[2025-07-05 09:04:11] [Rank 0] step:161/10000 train_time:12454ms step_avg:77.35ms +[2025-07-05 09:04:13] [Rank 0] step:181/10000 train_time:13971ms step_avg:77.19ms +[2025-07-05 09:04:13] [Rank 0] step:181/10000 train_time:13971ms step_avg:77.19ms +[2025-07-05 09:04:14] [Rank 0] step:201/10000 train_time:15623ms step_avg:77.73ms +[2025-07-05 09:04:14] [Rank 0] step:201/10000 train_time:15623ms step_avg:77.73ms +[2025-07-05 09:04:16] [Rank 0] step:221/10000 train_time:17193ms step_avg:77.80ms +[2025-07-05 09:04:16] [Rank 0] step:221/10000 train_time:17193ms step_avg:77.80ms +[2025-07-05 09:04:18] [Rank 0] step:241/10000 train_time:18661ms step_avg:77.43ms +[2025-07-05 09:04:18] [Rank 0] step:241/10000 train_time:18661ms step_avg:77.43ms +[2025-07-05 09:04:19] [Rank 0] step:261/10000 train_time:20130ms step_avg:77.13ms +[2025-07-05 09:04:19] [Rank 0] step:261/10000 train_time:20130ms step_avg:77.13ms +[2025-07-05 09:04:21] [Rank 0] step:281/10000 train_time:21835ms step_avg:77.71ms +[2025-07-05 09:04:21] [Rank 0] step:281/10000 train_time:21835ms step_avg:77.71ms +[2025-07-05 09:04:22] [Rank 0] step:301/10000 train_time:23305ms step_avg:77.43ms +[2025-07-05 09:04:22] [Rank 0] step:301/10000 train_time:23305ms step_avg:77.43ms +[2025-07-05 09:04:24] [Rank 0] step:321/10000 train_time:24774ms step_avg:77.18ms +[2025-07-05 09:04:24] [Rank 0] step:321/10000 train_time:24774ms step_avg:77.18ms +[2025-07-05 09:04:25] [Rank 0] step:341/10000 train_time:26347ms step_avg:77.26ms +[2025-07-05 09:04:25] [Rank 0] step:341/10000 train_time:26347ms step_avg:77.26ms +[2025-07-05 09:04:27] [Rank 0] step:361/10000 train_time:27815ms step_avg:77.05ms +[2025-07-05 09:04:27] [Rank 0] step:361/10000 train_time:27815ms step_avg:77.05ms +[2025-07-05 09:04:29] [Rank 0] step:381/10000 train_time:29944ms step_avg:78.59ms +[2025-07-05 09:04:29] [Rank 0] step:381/10000 train_time:29944ms step_avg:78.59ms +[2025-07-05 09:04:30] [Rank 0] step:401/10000 train_time:31413ms step_avg:78.34ms +[2025-07-05 09:04:30] [Rank 0] step:401/10000 train_time:31413ms step_avg:78.34ms +[2025-07-05 09:04:32] [Rank 0] step:421/10000 train_time:32884ms step_avg:78.11ms +[2025-07-05 09:04:32] [Rank 0] step:421/10000 train_time:32884ms step_avg:78.11ms +[2025-07-05 09:04:33] [Rank 0] step:441/10000 train_time:34355ms step_avg:77.90ms +[2025-07-05 09:04:33] [Rank 0] step:441/10000 train_time:34355ms step_avg:77.90ms +[2025-07-05 09:04:35] [Rank 0] step:461/10000 train_time:36092ms step_avg:78.29ms +[2025-07-05 09:04:35] [Rank 0] step:461/10000 train_time:36092ms step_avg:78.29ms +[2025-07-05 09:04:36] [Rank 0] step:481/10000 train_time:37564ms step_avg:78.10ms +[2025-07-05 09:04:36] [Rank 0] step:481/10000 train_time:37564ms step_avg:78.10ms +[2025-07-05 09:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:04:39] [Rank 0] PRINT: step:500/10000 train_loss:3.3880 val_loss:1.6162 train_time:39148ms step_avg:78.30ms +[2025-07-05 09:04:39] [Rank 0] PRINT: step:500/10000 train_loss:3.3880 val_loss:1.6162 train_time:39148ms step_avg:78.30ms +[2025-07-05 09:04:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:04:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..34595f8ad91844111a51c1e724eb0ef6ddbe40c1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9edee4de-4386-44c7-97ea-2ef03e229fe0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_9edee4de-4386-44c7-97ea-2ef03e229fe0.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_9edee4de-4386-44c7-97ea-2ef03e229fe0.txt new file mode 100644 index 0000000000000000000000000000000000000000..241bf0bd89044c30886cad45439021b974b32eee --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_9edee4de-4386-44c7-97ea-2ef03e229fe0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:30:11] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:30:11 2025 --- +[2025-07-05 09:30:11] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:30:11 2025 --- +[2025-07-05 09:30:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:30:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:30:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:30:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:30:11] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:30:11] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:30:11] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45 +[2025-07-05 09:30:11] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45 +[2025-07-05 09:30:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:30:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:30:11] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:30:11] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:30:11] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:30:11] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:30:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:30:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:30:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:30:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:30:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:30:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:30:14] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:30:14] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:30:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:30:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:30:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:30:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:30:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:30:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:30:14] [Rank 0] PRINT: Model returns: +[2025-07-05 09:30:14] [Rank 0] PRINT: Model returns: +[2025-07-05 09:30:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:30:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:30:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:30:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:30:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:30:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:30:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:30:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:30:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:30:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:30:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:30:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:30:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:30:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:30:14] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:30:14] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:31:19] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:31:19] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:31:19] [Rank 0] PRINT: Starting training... +[2025-07-05 09:31:19] [Rank 0] PRINT: Starting training... +[2025-07-05 09:31:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:31:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:31:28] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.54ms +[2025-07-05 09:31:28] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.54ms +[2025-07-05 09:31:29] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 09:31:29] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 09:31:31] [Rank 0] step:61/10000 train_time:4671ms step_avg:76.57ms +[2025-07-05 09:31:31] [Rank 0] step:61/10000 train_time:4671ms step_avg:76.57ms +[2025-07-05 09:31:32] [Rank 0] step:81/10000 train_time:6132ms step_avg:75.70ms +[2025-07-05 09:31:32] [Rank 0] step:81/10000 train_time:6132ms step_avg:75.70ms +[2025-07-05 09:31:34] [Rank 0] step:101/10000 train_time:8244ms step_avg:81.62ms +[2025-07-05 09:31:34] [Rank 0] step:101/10000 train_time:8244ms step_avg:81.62ms +[2025-07-05 09:31:36] [Rank 0] step:121/10000 train_time:9711ms step_avg:80.26ms +[2025-07-05 09:31:36] [Rank 0] step:121/10000 train_time:9711ms step_avg:80.26ms +[2025-07-05 09:31:37] [Rank 0] step:141/10000 train_time:11179ms step_avg:79.28ms +[2025-07-05 09:31:37] [Rank 0] step:141/10000 train_time:11179ms step_avg:79.28ms +[2025-07-05 09:31:39] [Rank 0] step:161/10000 train_time:12647ms step_avg:78.55ms +[2025-07-05 09:31:39] [Rank 0] step:161/10000 train_time:12647ms step_avg:78.55ms +[2025-07-05 09:31:40] [Rank 0] step:181/10000 train_time:14164ms step_avg:78.26ms +[2025-07-05 09:31:40] [Rank 0] step:181/10000 train_time:14164ms step_avg:78.26ms +[2025-07-05 09:31:42] [Rank 0] step:201/10000 train_time:15824ms step_avg:78.73ms +[2025-07-05 09:31:42] [Rank 0] step:201/10000 train_time:15824ms step_avg:78.73ms +[2025-07-05 09:31:43] [Rank 0] step:221/10000 train_time:17292ms step_avg:78.24ms +[2025-07-05 09:31:43] [Rank 0] step:221/10000 train_time:17292ms step_avg:78.24ms +[2025-07-05 09:31:45] [Rank 0] step:241/10000 train_time:18760ms step_avg:77.84ms +[2025-07-05 09:31:45] [Rank 0] step:241/10000 train_time:18760ms step_avg:77.84ms +[2025-07-05 09:31:46] [Rank 0] step:261/10000 train_time:20230ms step_avg:77.51ms +[2025-07-05 09:31:46] [Rank 0] step:261/10000 train_time:20230ms step_avg:77.51ms +[2025-07-05 09:31:48] [Rank 0] step:281/10000 train_time:21734ms step_avg:77.35ms +[2025-07-05 09:31:48] [Rank 0] step:281/10000 train_time:21734ms step_avg:77.35ms +[2025-07-05 09:31:49] [Rank 0] step:301/10000 train_time:23206ms step_avg:77.10ms +[2025-07-05 09:31:49] [Rank 0] step:301/10000 train_time:23206ms step_avg:77.10ms +[2025-07-05 09:31:51] [Rank 0] step:321/10000 train_time:24676ms step_avg:76.87ms +[2025-07-05 09:31:51] [Rank 0] step:321/10000 train_time:24676ms step_avg:76.87ms +[2025-07-05 09:31:52] [Rank 0] step:341/10000 train_time:26146ms step_avg:76.68ms +[2025-07-05 09:31:52] [Rank 0] step:341/10000 train_time:26146ms step_avg:76.68ms +[2025-07-05 09:31:54] [Rank 0] step:361/10000 train_time:27668ms step_avg:76.64ms +[2025-07-05 09:31:54] [Rank 0] step:361/10000 train_time:27668ms step_avg:76.64ms +[2025-07-05 09:31:55] [Rank 0] step:381/10000 train_time:29321ms step_avg:76.96ms +[2025-07-05 09:31:55] [Rank 0] step:381/10000 train_time:29321ms step_avg:76.96ms +[2025-07-05 09:31:57] [Rank 0] step:401/10000 train_time:30793ms step_avg:76.79ms +[2025-07-05 09:31:57] [Rank 0] step:401/10000 train_time:30793ms step_avg:76.79ms +[2025-07-05 09:31:58] [Rank 0] step:421/10000 train_time:32263ms step_avg:76.64ms +[2025-07-05 09:31:58] [Rank 0] step:421/10000 train_time:32263ms step_avg:76.64ms +[2025-07-05 09:32:00] [Rank 0] step:441/10000 train_time:34003ms step_avg:77.10ms +[2025-07-05 09:32:00] [Rank 0] step:441/10000 train_time:34003ms step_avg:77.10ms +[2025-07-05 09:32:02] [Rank 0] step:461/10000 train_time:35588ms step_avg:77.20ms +[2025-07-05 09:32:02] [Rank 0] step:461/10000 train_time:35588ms step_avg:77.20ms +[2025-07-05 09:32:03] [Rank 0] step:481/10000 train_time:37063ms step_avg:77.05ms +[2025-07-05 09:32:03] [Rank 0] step:481/10000 train_time:37063ms step_avg:77.05ms +[2025-07-05 09:32:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:32:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:32:06] [Rank 0] PRINT: step:500/10000 train_loss:3.3877 val_loss:1.6147 train_time:38536ms step_avg:77.07ms +[2025-07-05 09:32:06] [Rank 0] PRINT: step:500/10000 train_loss:3.3877 val_loss:1.6147 train_time:38536ms step_avg:77.07ms +[2025-07-05 09:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d2171e898eb347c0c6d6dc01decfbb976ba49754 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "92b0b921-d018-4327-b592-765d508c26e7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_92b0b921-d018-4327-b592-765d508c26e7.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_92b0b921-d018-4327-b592-765d508c26e7.txt new file mode 100644 index 0000000000000000000000000000000000000000..fef77bbd3054e5d06628bf0e939877b32afbacc8 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_92b0b921-d018-4327-b592-765d508c26e7.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:57:15] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:57:15 2025 --- +[2025-07-05 09:57:15] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:57:15 2025 --- +[2025-07-05 09:57:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:57:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:57:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:57:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:57:15] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:57:15] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:57:15] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46 +[2025-07-05 09:57:15] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46 +[2025-07-05 09:57:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:57:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:57:16] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:57:16] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:57:16] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:57:16] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:57:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:57:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:57:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:57:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:57:18] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:57:18] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:57:19] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:57:19] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:57:19] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:57:19] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:57:19] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:57:19] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:57:19] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:57:19] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:57:19] [Rank 0] PRINT: Model returns: +[2025-07-05 09:57:19] [Rank 0] PRINT: Model returns: +[2025-07-05 09:57:19] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:57:19] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:57:19] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:57:19] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:57:19] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:57:19] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:57:19] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:57:19] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:57:19] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:57:19] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:57:19] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:57:19] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:57:19] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:57:19] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:57:19] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:57:19] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:58:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:58:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:58:25] [Rank 0] PRINT: Starting training... +[2025-07-05 09:58:25] [Rank 0] PRINT: Starting training... +[2025-07-05 09:58:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:58:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:58:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:58:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:58:34] [Rank 0] step:21/10000 train_time:1758ms step_avg:83.72ms +[2025-07-05 09:58:34] [Rank 0] step:21/10000 train_time:1758ms step_avg:83.72ms +[2025-07-05 09:58:36] [Rank 0] step:41/10000 train_time:3218ms step_avg:78.49ms +[2025-07-05 09:58:36] [Rank 0] step:41/10000 train_time:3218ms step_avg:78.49ms +[2025-07-05 09:58:37] [Rank 0] step:61/10000 train_time:4682ms step_avg:76.75ms +[2025-07-05 09:58:37] [Rank 0] step:61/10000 train_time:4682ms step_avg:76.75ms +[2025-07-05 09:58:39] [Rank 0] step:81/10000 train_time:6148ms step_avg:75.90ms +[2025-07-05 09:58:39] [Rank 0] step:81/10000 train_time:6148ms step_avg:75.90ms +[2025-07-05 09:58:40] [Rank 0] step:101/10000 train_time:7859ms step_avg:77.81ms +[2025-07-05 09:58:40] [Rank 0] step:101/10000 train_time:7859ms step_avg:77.81ms +[2025-07-05 09:58:42] [Rank 0] step:121/10000 train_time:9329ms step_avg:77.10ms +[2025-07-05 09:58:42] [Rank 0] step:121/10000 train_time:9329ms step_avg:77.10ms +[2025-07-05 09:58:43] [Rank 0] step:141/10000 train_time:10797ms step_avg:76.57ms +[2025-07-05 09:58:43] [Rank 0] step:141/10000 train_time:10797ms step_avg:76.57ms +[2025-07-05 09:58:45] [Rank 0] step:161/10000 train_time:12269ms step_avg:76.20ms +[2025-07-05 09:58:45] [Rank 0] step:161/10000 train_time:12269ms step_avg:76.20ms +[2025-07-05 09:58:46] [Rank 0] step:181/10000 train_time:13740ms step_avg:75.91ms +[2025-07-05 09:58:46] [Rank 0] step:181/10000 train_time:13740ms step_avg:75.91ms +[2025-07-05 09:58:48] [Rank 0] step:201/10000 train_time:15444ms step_avg:76.84ms +[2025-07-05 09:58:48] [Rank 0] step:201/10000 train_time:15444ms step_avg:76.84ms +[2025-07-05 09:58:49] [Rank 0] step:221/10000 train_time:16916ms step_avg:76.54ms +[2025-07-05 09:58:49] [Rank 0] step:221/10000 train_time:16916ms step_avg:76.54ms +[2025-07-05 09:58:51] [Rank 0] step:241/10000 train_time:18386ms step_avg:76.29ms +[2025-07-05 09:58:51] [Rank 0] step:241/10000 train_time:18386ms step_avg:76.29ms +[2025-07-05 09:58:52] [Rank 0] step:261/10000 train_time:19857ms step_avg:76.08ms +[2025-07-05 09:58:52] [Rank 0] step:261/10000 train_time:19857ms step_avg:76.08ms +[2025-07-05 09:58:54] [Rank 0] step:281/10000 train_time:21565ms step_avg:76.74ms +[2025-07-05 09:58:54] [Rank 0] step:281/10000 train_time:21565ms step_avg:76.74ms +[2025-07-05 09:58:56] [Rank 0] step:301/10000 train_time:23036ms step_avg:76.53ms +[2025-07-05 09:58:56] [Rank 0] step:301/10000 train_time:23036ms step_avg:76.53ms +[2025-07-05 09:58:57] [Rank 0] step:321/10000 train_time:24509ms step_avg:76.35ms +[2025-07-05 09:58:57] [Rank 0] step:321/10000 train_time:24509ms step_avg:76.35ms +[2025-07-05 09:58:58] [Rank 0] step:341/10000 train_time:25982ms step_avg:76.19ms +[2025-07-05 09:58:58] [Rank 0] step:341/10000 train_time:25982ms step_avg:76.19ms +[2025-07-05 09:59:01] [Rank 0] step:361/10000 train_time:27707ms step_avg:76.75ms +[2025-07-05 09:59:01] [Rank 0] step:361/10000 train_time:27707ms step_avg:76.75ms +[2025-07-05 09:59:02] [Rank 0] step:381/10000 train_time:29565ms step_avg:77.60ms +[2025-07-05 09:59:02] [Rank 0] step:381/10000 train_time:29565ms step_avg:77.60ms +[2025-07-05 09:59:04] [Rank 0] step:401/10000 train_time:31036ms step_avg:77.40ms +[2025-07-05 09:59:04] [Rank 0] step:401/10000 train_time:31036ms step_avg:77.40ms +[2025-07-05 09:59:05] [Rank 0] step:421/10000 train_time:32510ms step_avg:77.22ms +[2025-07-05 09:59:05] [Rank 0] step:421/10000 train_time:32510ms step_avg:77.22ms +[2025-07-05 09:59:06] [Rank 0] step:441/10000 train_time:33982ms step_avg:77.06ms +[2025-07-05 09:59:06] [Rank 0] step:441/10000 train_time:33982ms step_avg:77.06ms +[2025-07-05 09:59:09] [Rank 0] step:461/10000 train_time:36121ms step_avg:78.35ms +[2025-07-05 09:59:09] [Rank 0] step:461/10000 train_time:36121ms step_avg:78.35ms +[2025-07-05 09:59:10] [Rank 0] step:481/10000 train_time:37592ms step_avg:78.15ms +[2025-07-05 09:59:10] [Rank 0] step:481/10000 train_time:37592ms step_avg:78.15ms +[2025-07-05 09:59:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:59:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:59:12] [Rank 0] PRINT: step:500/10000 train_loss:3.3873 val_loss:1.6174 train_time:39063ms step_avg:78.13ms +[2025-07-05 09:59:12] [Rank 0] PRINT: step:500/10000 train_loss:3.3873 val_loss:1.6174 train_time:39063ms step_avg:78.13ms +[2025-07-05 09:59:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:59:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b2cb82881f0ea6a38596908d655748ef62056381 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d9bb8cd7-c0fa-4ff6-8772-aa967cab0bf0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/training_log_d9bb8cd7-c0fa-4ff6-8772-aa967cab0bf0.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/training_log_d9bb8cd7-c0fa-4ff6-8772-aa967cab0bf0.txt new file mode 100644 index 0000000000000000000000000000000000000000..0912be9bcf407e746561b8ff1fe115b1085962f1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47/training_log_d9bb8cd7-c0fa-4ff6-8772-aa967cab0bf0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:15:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:15:57 2025 --- +[2025-07-05 08:15:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:15:57 2025 --- +[2025-07-05 08:15:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:15:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:15:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:15:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:15:57] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:15:57] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:15:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47 +[2025-07-05 08:15:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_47 +[2025-07-05 08:15:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:15:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:15:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:15:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:15:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:15:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:16:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:16:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:16:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:16:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:16:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:16:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:16:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:16:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:16:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:16:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:16:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:16:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:16:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:16:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:16:01] [Rank 0] PRINT: Model returns: +[2025-07-05 08:16:01] [Rank 0] PRINT: Model returns: +[2025-07-05 08:16:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:16:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:16:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:16:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:16:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:16:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:16:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:16:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:16:01] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:16:01] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:16:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:16:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:16:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:16:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:16:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:16:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:17:39] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:17:39] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:17:39] [Rank 0] PRINT: Starting training... +[2025-07-05 08:17:39] [Rank 0] PRINT: Starting training... +[2025-07-05 08:17:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:17:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:17:47] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:17:47] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:17:48] [Rank 0] step:21/10000 train_time:1756ms step_avg:83.63ms +[2025-07-05 08:17:48] [Rank 0] step:21/10000 train_time:1756ms step_avg:83.63ms +[2025-07-05 08:17:50] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 08:17:50] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 08:17:51] [Rank 0] step:61/10000 train_time:4674ms step_avg:76.62ms +[2025-07-05 08:17:51] [Rank 0] step:61/10000 train_time:4674ms step_avg:76.62ms +[2025-07-05 08:17:53] [Rank 0] step:81/10000 train_time:6135ms step_avg:75.74ms +[2025-07-05 08:17:53] [Rank 0] step:81/10000 train_time:6135ms step_avg:75.74ms +[2025-07-05 08:17:55] [Rank 0] step:101/10000 train_time:8287ms step_avg:82.05ms +[2025-07-05 08:17:55] [Rank 0] step:101/10000 train_time:8287ms step_avg:82.05ms +[2025-07-05 08:17:56] [Rank 0] step:121/10000 train_time:9753ms step_avg:80.60ms +[2025-07-05 08:17:56] [Rank 0] step:121/10000 train_time:9753ms step_avg:80.60ms +[2025-07-05 08:17:58] [Rank 0] step:141/10000 train_time:11322ms step_avg:80.30ms +[2025-07-05 08:17:58] [Rank 0] step:141/10000 train_time:11322ms step_avg:80.30ms +[2025-07-05 08:18:00] [Rank 0] step:161/10000 train_time:12792ms step_avg:79.45ms +[2025-07-05 08:18:00] [Rank 0] step:161/10000 train_time:12792ms step_avg:79.45ms +[2025-07-05 08:18:02] [Rank 0] step:181/10000 train_time:14596ms step_avg:80.64ms +[2025-07-05 08:18:02] [Rank 0] step:181/10000 train_time:14596ms step_avg:80.64ms +[2025-07-05 08:18:03] [Rank 0] step:201/10000 train_time:16650ms step_avg:82.84ms +[2025-07-05 08:18:03] [Rank 0] step:201/10000 train_time:16650ms step_avg:82.84ms +[2025-07-05 08:18:05] [Rank 0] step:221/10000 train_time:18122ms step_avg:82.00ms +[2025-07-05 08:18:05] [Rank 0] step:221/10000 train_time:18122ms step_avg:82.00ms +[2025-07-05 08:18:06] [Rank 0] step:241/10000 train_time:19589ms step_avg:81.28ms +[2025-07-05 08:18:06] [Rank 0] step:241/10000 train_time:19589ms step_avg:81.28ms +[2025-07-05 08:18:08] [Rank 0] step:261/10000 train_time:21059ms step_avg:80.68ms +[2025-07-05 08:18:08] [Rank 0] step:261/10000 train_time:21059ms step_avg:80.68ms +[2025-07-05 08:18:10] [Rank 0] step:281/10000 train_time:23178ms step_avg:82.48ms +[2025-07-05 08:18:10] [Rank 0] step:281/10000 train_time:23178ms step_avg:82.48ms +[2025-07-05 08:18:11] [Rank 0] step:301/10000 train_time:24648ms step_avg:81.89ms +[2025-07-05 08:18:11] [Rank 0] step:301/10000 train_time:24648ms step_avg:81.89ms +[2025-07-05 08:18:13] [Rank 0] step:321/10000 train_time:26115ms step_avg:81.35ms +[2025-07-05 08:18:13] [Rank 0] step:321/10000 train_time:26115ms step_avg:81.35ms +[2025-07-05 08:18:14] [Rank 0] step:341/10000 train_time:27584ms step_avg:80.89ms +[2025-07-05 08:18:14] [Rank 0] step:341/10000 train_time:27584ms step_avg:80.89ms +[2025-07-05 08:18:16] [Rank 0] step:361/10000 train_time:29056ms step_avg:80.49ms +[2025-07-05 08:18:16] [Rank 0] step:361/10000 train_time:29056ms step_avg:80.49ms +[2025-07-05 08:18:18] [Rank 0] step:381/10000 train_time:31178ms step_avg:81.83ms +[2025-07-05 08:18:18] [Rank 0] step:381/10000 train_time:31178ms step_avg:81.83ms +[2025-07-05 08:18:19] [Rank 0] step:401/10000 train_time:32645ms step_avg:81.41ms +[2025-07-05 08:18:19] [Rank 0] step:401/10000 train_time:32645ms step_avg:81.41ms +[2025-07-05 08:18:21] [Rank 0] step:421/10000 train_time:34113ms step_avg:81.03ms +[2025-07-05 08:18:21] [Rank 0] step:421/10000 train_time:34113ms step_avg:81.03ms +[2025-07-05 08:18:22] [Rank 0] step:441/10000 train_time:35582ms step_avg:80.69ms +[2025-07-05 08:18:22] [Rank 0] step:441/10000 train_time:35582ms step_avg:80.69ms +[2025-07-05 08:18:24] [Rank 0] step:461/10000 train_time:37706ms step_avg:81.79ms +[2025-07-05 08:18:24] [Rank 0] step:461/10000 train_time:37706ms step_avg:81.79ms +[2025-07-05 08:18:26] [Rank 0] step:481/10000 train_time:39174ms step_avg:81.44ms +[2025-07-05 08:18:26] [Rank 0] step:481/10000 train_time:39174ms step_avg:81.44ms +[2025-07-05 08:18:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:18:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:18:28] [Rank 0] PRINT: step:500/10000 train_loss:3.3874 val_loss:1.6159 train_time:40645ms step_avg:81.29ms +[2025-07-05 08:18:28] [Rank 0] PRINT: step:500/10000 train_loss:3.3874 val_loss:1.6159 train_time:40645ms step_avg:81.29ms +[2025-07-05 08:18:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:18:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..feeacc26867d9e1cb2765d5e01802dbeb532ea96 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "6d0aab6e-ab7d-4cc9-acf7-875eacb855f3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_6d0aab6e-ab7d-4cc9-acf7-875eacb855f3.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_6d0aab6e-ab7d-4cc9-acf7-875eacb855f3.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5303150a6f4c2cbfa4a4801b2323c407e6edaaa --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_6d0aab6e-ab7d-4cc9-acf7-875eacb855f3.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:44:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:44:03 2025 --- +[2025-07-05 08:44:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:44:03 2025 --- +[2025-07-05 08:44:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:44:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:44:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:44:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:44:03] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:44:03] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:44:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48 +[2025-07-05 08:44:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48 +[2025-07-05 08:44:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:44:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:44:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:44:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:44:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:44:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:44:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:44:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:44:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:44:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:44:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:44:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:44:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:44:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:44:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:44:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:44:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:44:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:44:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:44:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:44:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:44:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:44:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:44:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:44:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:44:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:44:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:44:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:44:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:44:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:44:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:44:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:44:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:44:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:44:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:44:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:44:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:44:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:45:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:45:11] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:45:11] [Rank 0] PRINT: Starting training... +[2025-07-05 08:45:11] [Rank 0] PRINT: Starting training... +[2025-07-05 08:45:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:45:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:45:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:45:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:45:20] [Rank 0] step:21/10000 train_time:1549ms step_avg:73.78ms +[2025-07-05 08:45:20] [Rank 0] step:21/10000 train_time:1549ms step_avg:73.78ms +[2025-07-05 08:45:22] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.22ms +[2025-07-05 08:45:22] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.22ms +[2025-07-05 08:45:23] [Rank 0] step:61/10000 train_time:4457ms step_avg:73.07ms +[2025-07-05 08:45:23] [Rank 0] step:61/10000 train_time:4457ms step_avg:73.07ms +[2025-07-05 08:45:25] [Rank 0] step:81/10000 train_time:6045ms step_avg:74.63ms +[2025-07-05 08:45:25] [Rank 0] step:81/10000 train_time:6045ms step_avg:74.63ms +[2025-07-05 08:45:27] [Rank 0] step:101/10000 train_time:8257ms step_avg:81.75ms +[2025-07-05 08:45:27] [Rank 0] step:101/10000 train_time:8257ms step_avg:81.75ms +[2025-07-05 08:45:29] [Rank 0] step:121/10000 train_time:9716ms step_avg:80.30ms +[2025-07-05 08:45:29] [Rank 0] step:121/10000 train_time:9716ms step_avg:80.30ms +[2025-07-05 08:45:30] [Rank 0] step:141/10000 train_time:11177ms step_avg:79.27ms +[2025-07-05 08:45:30] [Rank 0] step:141/10000 train_time:11177ms step_avg:79.27ms +[2025-07-05 08:45:31] [Rank 0] step:161/10000 train_time:12640ms step_avg:78.51ms +[2025-07-05 08:45:31] [Rank 0] step:161/10000 train_time:12640ms step_avg:78.51ms +[2025-07-05 08:45:33] [Rank 0] step:181/10000 train_time:14154ms step_avg:78.20ms +[2025-07-05 08:45:33] [Rank 0] step:181/10000 train_time:14154ms step_avg:78.20ms +[2025-07-05 08:45:35] [Rank 0] step:201/10000 train_time:15799ms step_avg:78.60ms +[2025-07-05 08:45:35] [Rank 0] step:201/10000 train_time:15799ms step_avg:78.60ms +[2025-07-05 08:45:36] [Rank 0] step:221/10000 train_time:17264ms step_avg:78.12ms +[2025-07-05 08:45:36] [Rank 0] step:221/10000 train_time:17264ms step_avg:78.12ms +[2025-07-05 08:45:38] [Rank 0] step:241/10000 train_time:18727ms step_avg:77.71ms +[2025-07-05 08:45:38] [Rank 0] step:241/10000 train_time:18727ms step_avg:77.71ms +[2025-07-05 08:45:39] [Rank 0] step:261/10000 train_time:20191ms step_avg:77.36ms +[2025-07-05 08:45:39] [Rank 0] step:261/10000 train_time:20191ms step_avg:77.36ms +[2025-07-05 08:45:41] [Rank 0] step:281/10000 train_time:22301ms step_avg:79.36ms +[2025-07-05 08:45:41] [Rank 0] step:281/10000 train_time:22301ms step_avg:79.36ms +[2025-07-05 08:45:43] [Rank 0] step:301/10000 train_time:23766ms step_avg:78.96ms +[2025-07-05 08:45:43] [Rank 0] step:301/10000 train_time:23766ms step_avg:78.96ms +[2025-07-05 08:45:44] [Rank 0] step:321/10000 train_time:25230ms step_avg:78.60ms +[2025-07-05 08:45:44] [Rank 0] step:321/10000 train_time:25230ms step_avg:78.60ms +[2025-07-05 08:45:46] [Rank 0] step:341/10000 train_time:26696ms step_avg:78.29ms +[2025-07-05 08:45:46] [Rank 0] step:341/10000 train_time:26696ms step_avg:78.29ms +[2025-07-05 08:45:47] [Rank 0] step:361/10000 train_time:28414ms step_avg:78.71ms +[2025-07-05 08:45:47] [Rank 0] step:361/10000 train_time:28414ms step_avg:78.71ms +[2025-07-05 08:45:49] [Rank 0] step:381/10000 train_time:29858ms step_avg:78.37ms +[2025-07-05 08:45:49] [Rank 0] step:381/10000 train_time:29858ms step_avg:78.37ms +[2025-07-05 08:45:50] [Rank 0] step:401/10000 train_time:31321ms step_avg:78.11ms +[2025-07-05 08:45:50] [Rank 0] step:401/10000 train_time:31321ms step_avg:78.11ms +[2025-07-05 08:45:52] [Rank 0] step:421/10000 train_time:32788ms step_avg:77.88ms +[2025-07-05 08:45:52] [Rank 0] step:421/10000 train_time:32788ms step_avg:77.88ms +[2025-07-05 08:45:53] [Rank 0] step:441/10000 train_time:34252ms step_avg:77.67ms +[2025-07-05 08:45:53] [Rank 0] step:441/10000 train_time:34252ms step_avg:77.67ms +[2025-07-05 08:45:55] [Rank 0] step:461/10000 train_time:36382ms step_avg:78.92ms +[2025-07-05 08:45:55] [Rank 0] step:461/10000 train_time:36382ms step_avg:78.92ms +[2025-07-05 08:45:57] [Rank 0] step:481/10000 train_time:37847ms step_avg:78.68ms +[2025-07-05 08:45:57] [Rank 0] step:481/10000 train_time:37847ms step_avg:78.68ms +[2025-07-05 08:45:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:45:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:45:59] [Rank 0] PRINT: step:500/10000 train_loss:3.3886 val_loss:1.6151 train_time:39314ms step_avg:78.63ms +[2025-07-05 08:45:59] [Rank 0] PRINT: step:500/10000 train_loss:3.3886 val_loss:1.6151 train_time:39314ms step_avg:78.63ms +[2025-07-05 08:45:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:45:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..aa9c1eac6535cdceb4ebdb09150117be6b344285 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "431ff50e-b6f1-45ad-a508-5b9e3a14c3e9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_431ff50e-b6f1-45ad-a508-5b9e3a14c3e9.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_431ff50e-b6f1-45ad-a508-5b9e3a14c3e9.txt new file mode 100644 index 0000000000000000000000000000000000000000..ceca1c89ffcbee70ead038c03b296b870c5706af --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_431ff50e-b6f1-45ad-a508-5b9e3a14c3e9.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:11:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:11:30 2025 --- +[2025-07-05 09:11:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:11:30 2025 --- +[2025-07-05 09:11:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:11:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:11:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:11:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:11:30] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:11:30] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:11:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49 +[2025-07-05 09:11:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49 +[2025-07-05 09:11:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:11:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:11:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:11:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:11:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:11:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:11:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:11:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:11:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:11:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:11:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:11:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:11:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:11:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:11:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:11:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:11:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:11:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:11:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:11:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:11:34] [Rank 0] PRINT: Model returns: +[2025-07-05 09:11:34] [Rank 0] PRINT: Model returns: +[2025-07-05 09:11:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:11:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:11:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:11:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:11:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:11:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:11:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:11:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:11:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:11:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:11:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:11:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:11:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:11:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:11:34] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:11:34] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:12:43] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:12:43] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:12:43] [Rank 0] PRINT: Starting training... +[2025-07-05 09:12:43] [Rank 0] PRINT: Starting training... +[2025-07-05 09:12:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:12:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:12:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:12:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:12:52] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.25ms +[2025-07-05 09:12:52] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.25ms +[2025-07-05 09:12:53] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-05 09:12:53] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-05 09:12:55] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.35ms +[2025-07-05 09:12:55] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.35ms +[2025-07-05 09:12:56] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.49ms +[2025-07-05 09:12:56] [Rank 0] step:81/10000 train_time:6115ms step_avg:75.49ms +[2025-07-05 09:12:58] [Rank 0] step:101/10000 train_time:8240ms step_avg:81.59ms +[2025-07-05 09:12:58] [Rank 0] step:101/10000 train_time:8240ms step_avg:81.59ms +[2025-07-05 09:13:00] [Rank 0] step:121/10000 train_time:9705ms step_avg:80.20ms +[2025-07-05 09:13:00] [Rank 0] step:121/10000 train_time:9705ms step_avg:80.20ms +[2025-07-05 09:13:01] [Rank 0] step:141/10000 train_time:11166ms step_avg:79.19ms +[2025-07-05 09:13:01] [Rank 0] step:141/10000 train_time:11166ms step_avg:79.19ms +[2025-07-05 09:13:03] [Rank 0] step:161/10000 train_time:12630ms step_avg:78.45ms +[2025-07-05 09:13:03] [Rank 0] step:161/10000 train_time:12630ms step_avg:78.45ms +[2025-07-05 09:13:05] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-05 09:13:05] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-05 09:13:06] [Rank 0] step:201/10000 train_time:16205ms step_avg:80.62ms +[2025-07-05 09:13:06] [Rank 0] step:201/10000 train_time:16205ms step_avg:80.62ms +[2025-07-05 09:13:08] [Rank 0] step:221/10000 train_time:17668ms step_avg:79.95ms +[2025-07-05 09:13:08] [Rank 0] step:221/10000 train_time:17668ms step_avg:79.95ms +[2025-07-05 09:13:09] [Rank 0] step:241/10000 train_time:19133ms step_avg:79.39ms +[2025-07-05 09:13:09] [Rank 0] step:241/10000 train_time:19133ms step_avg:79.39ms +[2025-07-05 09:13:11] [Rank 0] step:261/10000 train_time:20603ms step_avg:78.94ms +[2025-07-05 09:13:11] [Rank 0] step:261/10000 train_time:20603ms step_avg:78.94ms +[2025-07-05 09:13:13] [Rank 0] step:281/10000 train_time:22732ms step_avg:80.90ms +[2025-07-05 09:13:13] [Rank 0] step:281/10000 train_time:22732ms step_avg:80.90ms +[2025-07-05 09:13:14] [Rank 0] step:301/10000 train_time:24196ms step_avg:80.39ms +[2025-07-05 09:13:14] [Rank 0] step:301/10000 train_time:24196ms step_avg:80.39ms +[2025-07-05 09:13:16] [Rank 0] step:321/10000 train_time:25661ms step_avg:79.94ms +[2025-07-05 09:13:16] [Rank 0] step:321/10000 train_time:25661ms step_avg:79.94ms +[2025-07-05 09:13:17] [Rank 0] step:341/10000 train_time:27128ms step_avg:79.55ms +[2025-07-05 09:13:17] [Rank 0] step:341/10000 train_time:27128ms step_avg:79.55ms +[2025-07-05 09:13:19] [Rank 0] step:361/10000 train_time:29255ms step_avg:81.04ms +[2025-07-05 09:13:19] [Rank 0] step:361/10000 train_time:29255ms step_avg:81.04ms +[2025-07-05 09:13:21] [Rank 0] step:381/10000 train_time:30700ms step_avg:80.58ms +[2025-07-05 09:13:21] [Rank 0] step:381/10000 train_time:30700ms step_avg:80.58ms +[2025-07-05 09:13:22] [Rank 0] step:401/10000 train_time:32165ms step_avg:80.21ms +[2025-07-05 09:13:22] [Rank 0] step:401/10000 train_time:32165ms step_avg:80.21ms +[2025-07-05 09:13:24] [Rank 0] step:421/10000 train_time:33631ms step_avg:79.88ms +[2025-07-05 09:13:24] [Rank 0] step:421/10000 train_time:33631ms step_avg:79.88ms +[2025-07-05 09:13:25] [Rank 0] step:441/10000 train_time:35097ms step_avg:79.58ms +[2025-07-05 09:13:25] [Rank 0] step:441/10000 train_time:35097ms step_avg:79.58ms +[2025-07-05 09:13:27] [Rank 0] step:461/10000 train_time:37229ms step_avg:80.76ms +[2025-07-05 09:13:27] [Rank 0] step:461/10000 train_time:37229ms step_avg:80.76ms +[2025-07-05 09:13:29] [Rank 0] step:481/10000 train_time:38694ms step_avg:80.44ms +[2025-07-05 09:13:29] [Rank 0] step:481/10000 train_time:38694ms step_avg:80.44ms +[2025-07-05 09:13:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:13:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:13:31] [Rank 0] PRINT: step:500/10000 train_loss:3.3897 val_loss:1.6161 train_time:40159ms step_avg:80.32ms +[2025-07-05 09:13:31] [Rank 0] PRINT: step:500/10000 train_loss:3.3897 val_loss:1.6161 train_time:40159ms step_avg:80.32ms +[2025-07-05 09:13:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:13:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e571d5d843c0bd554b289408b8e4cd47c1af424d --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d9878f92-d5b8-43a0-bfd5-aa1ed9796855", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/training_log_d9878f92-d5b8-43a0-bfd5-aa1ed9796855.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/training_log_d9878f92-d5b8-43a0-bfd5-aa1ed9796855.txt new file mode 100644 index 0000000000000000000000000000000000000000..95c1e470e7c70fb7e740f26c64c79023dda3028b --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50/training_log_d9878f92-d5b8-43a0-bfd5-aa1ed9796855.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:39:00] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:39:00 2025 --- +[2025-07-05 09:39:00] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:39:00 2025 --- +[2025-07-05 09:39:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:39:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:39:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:39:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:39:00] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:39:00] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:39:00] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50 +[2025-07-05 09:39:00] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_50 +[2025-07-05 09:39:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:39:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:39:01] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:39:01] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:39:01] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:39:01] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:39:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:39:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:39:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:39:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:39:03] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:39:03] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:39:04] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:39:04] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:39:04] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:39:04] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:39:04] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:39:04] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:39:04] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:39:04] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:39:04] [Rank 0] PRINT: Model returns: +[2025-07-05 09:39:04] [Rank 0] PRINT: Model returns: +[2025-07-05 09:39:04] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:39:04] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:39:04] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:39:04] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:39:04] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:39:04] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:39:04] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:39:04] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:39:04] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:39:04] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:39:04] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:39:04] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:39:04] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:39:04] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:39:04] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:39:04] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:40:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:40:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:40:09] [Rank 0] PRINT: Starting training... +[2025-07-05 09:40:09] [Rank 0] PRINT: Starting training... +[2025-07-05 09:40:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:40:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:40:18] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.30ms +[2025-07-05 09:40:18] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.30ms +[2025-07-05 09:40:20] [Rank 0] step:41/10000 train_time:3204ms step_avg:78.15ms +[2025-07-05 09:40:20] [Rank 0] step:41/10000 train_time:3204ms step_avg:78.15ms +[2025-07-05 09:40:21] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.40ms +[2025-07-05 09:40:21] [Rank 0] step:61/10000 train_time:4661ms step_avg:76.40ms +[2025-07-05 09:40:23] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.56ms +[2025-07-05 09:40:23] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.56ms +[2025-07-05 09:40:24] [Rank 0] step:101/10000 train_time:7823ms step_avg:77.46ms +[2025-07-05 09:40:24] [Rank 0] step:101/10000 train_time:7823ms step_avg:77.46ms +[2025-07-05 09:40:26] [Rank 0] step:121/10000 train_time:9286ms step_avg:76.74ms +[2025-07-05 09:40:26] [Rank 0] step:121/10000 train_time:9286ms step_avg:76.74ms +[2025-07-05 09:40:27] [Rank 0] step:141/10000 train_time:10748ms step_avg:76.23ms +[2025-07-05 09:40:27] [Rank 0] step:141/10000 train_time:10748ms step_avg:76.23ms +[2025-07-05 09:40:29] [Rank 0] step:161/10000 train_time:12213ms step_avg:75.86ms +[2025-07-05 09:40:29] [Rank 0] step:161/10000 train_time:12213ms step_avg:75.86ms +[2025-07-05 09:40:31] [Rank 0] step:181/10000 train_time:14372ms step_avg:79.40ms +[2025-07-05 09:40:31] [Rank 0] step:181/10000 train_time:14372ms step_avg:79.40ms +[2025-07-05 09:40:32] [Rank 0] step:201/10000 train_time:15813ms step_avg:78.67ms +[2025-07-05 09:40:32] [Rank 0] step:201/10000 train_time:15813ms step_avg:78.67ms +[2025-07-05 09:40:34] [Rank 0] step:221/10000 train_time:17276ms step_avg:78.17ms +[2025-07-05 09:40:34] [Rank 0] step:221/10000 train_time:17276ms step_avg:78.17ms +[2025-07-05 09:40:35] [Rank 0] step:241/10000 train_time:18742ms step_avg:77.77ms +[2025-07-05 09:40:35] [Rank 0] step:241/10000 train_time:18742ms step_avg:77.77ms +[2025-07-05 09:40:37] [Rank 0] step:261/10000 train_time:20208ms step_avg:77.43ms +[2025-07-05 09:40:37] [Rank 0] step:261/10000 train_time:20208ms step_avg:77.43ms +[2025-07-05 09:40:39] [Rank 0] step:281/10000 train_time:22340ms step_avg:79.50ms +[2025-07-05 09:40:39] [Rank 0] step:281/10000 train_time:22340ms step_avg:79.50ms +[2025-07-05 09:40:40] [Rank 0] step:301/10000 train_time:23805ms step_avg:79.09ms +[2025-07-05 09:40:40] [Rank 0] step:301/10000 train_time:23805ms step_avg:79.09ms +[2025-07-05 09:40:42] [Rank 0] step:321/10000 train_time:25272ms step_avg:78.73ms +[2025-07-05 09:40:42] [Rank 0] step:321/10000 train_time:25272ms step_avg:78.73ms +[2025-07-05 09:40:43] [Rank 0] step:341/10000 train_time:26741ms step_avg:78.42ms +[2025-07-05 09:40:43] [Rank 0] step:341/10000 train_time:26741ms step_avg:78.42ms +[2025-07-05 09:40:45] [Rank 0] step:361/10000 train_time:28462ms step_avg:78.84ms +[2025-07-05 09:40:45] [Rank 0] step:361/10000 train_time:28462ms step_avg:78.84ms +[2025-07-05 09:40:47] [Rank 0] step:381/10000 train_time:30339ms step_avg:79.63ms +[2025-07-05 09:40:47] [Rank 0] step:381/10000 train_time:30339ms step_avg:79.63ms +[2025-07-05 09:40:48] [Rank 0] step:401/10000 train_time:31806ms step_avg:79.32ms +[2025-07-05 09:40:48] [Rank 0] step:401/10000 train_time:31806ms step_avg:79.32ms +[2025-07-05 09:40:50] [Rank 0] step:421/10000 train_time:33274ms step_avg:79.03ms +[2025-07-05 09:40:50] [Rank 0] step:421/10000 train_time:33274ms step_avg:79.03ms +[2025-07-05 09:40:51] [Rank 0] step:441/10000 train_time:34741ms step_avg:78.78ms +[2025-07-05 09:40:51] [Rank 0] step:441/10000 train_time:34741ms step_avg:78.78ms +[2025-07-05 09:40:53] [Rank 0] step:461/10000 train_time:36868ms step_avg:79.97ms +[2025-07-05 09:40:53] [Rank 0] step:461/10000 train_time:36868ms step_avg:79.97ms +[2025-07-05 09:40:55] [Rank 0] step:481/10000 train_time:38337ms step_avg:79.70ms +[2025-07-05 09:40:55] [Rank 0] step:481/10000 train_time:38337ms step_avg:79.70ms +[2025-07-05 09:40:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:40:57] [Rank 0] PRINT: step:500/10000 train_loss:3.3890 val_loss:1.6144 train_time:39803ms step_avg:79.61ms +[2025-07-05 09:40:57] [Rank 0] PRINT: step:500/10000 train_loss:3.3890 val_loss:1.6144 train_time:39803ms step_avg:79.61ms +[2025-07-05 09:40:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:40:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..edc7789b03f546d9c28f77366d8f1d25c2332f89 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "6515139d-da01-4cd3-98f4-11529a2776dd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/training_log_6515139d-da01-4cd3-98f4-11529a2776dd.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/training_log_6515139d-da01-4cd3-98f4-11529a2776dd.txt new file mode 100644 index 0000000000000000000000000000000000000000..294f072bd1ca82c94485678cbfe8ca07d1f710e1 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51/training_log_6515139d-da01-4cd3-98f4-11529a2776dd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:06:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:06:18 2025 --- +[2025-07-05 10:06:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:06:18 2025 --- +[2025-07-05 10:06:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:06:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:06:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:06:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:06:18] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:06:18] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:06:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51 +[2025-07-05 10:06:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_51 +[2025-07-05 10:06:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:06:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:06:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:06:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:06:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:06:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:06:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:06:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:06:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:06:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:06:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:06:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:06:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:06:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:06:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:06:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:06:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:06:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:06:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:06:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:06:22] [Rank 0] PRINT: Model returns: +[2025-07-05 10:06:22] [Rank 0] PRINT: Model returns: +[2025-07-05 10:06:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:06:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:06:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:06:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:06:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:06:22] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:06:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:06:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:06:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:06:22] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:06:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:06:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:06:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:06:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:06:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:06:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:07:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:07:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:07:27] [Rank 0] PRINT: Starting training... +[2025-07-05 10:07:27] [Rank 0] PRINT: Starting training... +[2025-07-05 10:07:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:07:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:07:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:07:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:07:36] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.57ms +[2025-07-05 10:07:36] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.57ms +[2025-07-05 10:07:37] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.40ms +[2025-07-05 10:07:37] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.40ms +[2025-07-05 10:07:39] [Rank 0] step:61/10000 train_time:4911ms step_avg:80.50ms +[2025-07-05 10:07:39] [Rank 0] step:61/10000 train_time:4911ms step_avg:80.50ms +[2025-07-05 10:07:41] [Rank 0] step:81/10000 train_time:6374ms step_avg:78.69ms +[2025-07-05 10:07:41] [Rank 0] step:81/10000 train_time:6374ms step_avg:78.69ms +[2025-07-05 10:07:43] [Rank 0] step:101/10000 train_time:8508ms step_avg:84.23ms +[2025-07-05 10:07:43] [Rank 0] step:101/10000 train_time:8508ms step_avg:84.23ms +[2025-07-05 10:07:44] [Rank 0] step:121/10000 train_time:9970ms step_avg:82.40ms +[2025-07-05 10:07:44] [Rank 0] step:121/10000 train_time:9970ms step_avg:82.40ms +[2025-07-05 10:07:46] [Rank 0] step:141/10000 train_time:11436ms step_avg:81.11ms +[2025-07-05 10:07:46] [Rank 0] step:141/10000 train_time:11436ms step_avg:81.11ms +[2025-07-05 10:07:47] [Rank 0] step:161/10000 train_time:12905ms step_avg:80.16ms +[2025-07-05 10:07:47] [Rank 0] step:161/10000 train_time:12905ms step_avg:80.16ms +[2025-07-05 10:07:49] [Rank 0] step:181/10000 train_time:15056ms step_avg:83.18ms +[2025-07-05 10:07:49] [Rank 0] step:181/10000 train_time:15056ms step_avg:83.18ms +[2025-07-05 10:07:51] [Rank 0] step:201/10000 train_time:16505ms step_avg:82.11ms +[2025-07-05 10:07:51] [Rank 0] step:201/10000 train_time:16505ms step_avg:82.11ms +[2025-07-05 10:07:52] [Rank 0] step:221/10000 train_time:17974ms step_avg:81.33ms +[2025-07-05 10:07:52] [Rank 0] step:221/10000 train_time:17974ms step_avg:81.33ms +[2025-07-05 10:07:54] [Rank 0] step:241/10000 train_time:19443ms step_avg:80.68ms +[2025-07-05 10:07:54] [Rank 0] step:241/10000 train_time:19443ms step_avg:80.68ms +[2025-07-05 10:07:55] [Rank 0] step:261/10000 train_time:20912ms step_avg:80.12ms +[2025-07-05 10:07:55] [Rank 0] step:261/10000 train_time:20912ms step_avg:80.12ms +[2025-07-05 10:07:57] [Rank 0] step:281/10000 train_time:23031ms step_avg:81.96ms +[2025-07-05 10:07:57] [Rank 0] step:281/10000 train_time:23031ms step_avg:81.96ms +[2025-07-05 10:07:59] [Rank 0] step:301/10000 train_time:24512ms step_avg:81.44ms +[2025-07-05 10:07:59] [Rank 0] step:301/10000 train_time:24512ms step_avg:81.44ms +[2025-07-05 10:08:00] [Rank 0] step:321/10000 train_time:25984ms step_avg:80.95ms +[2025-07-05 10:08:00] [Rank 0] step:321/10000 train_time:25984ms step_avg:80.95ms +[2025-07-05 10:08:02] [Rank 0] step:341/10000 train_time:27456ms step_avg:80.52ms +[2025-07-05 10:08:02] [Rank 0] step:341/10000 train_time:27456ms step_avg:80.52ms +[2025-07-05 10:08:03] [Rank 0] step:361/10000 train_time:29181ms step_avg:80.83ms +[2025-07-05 10:08:03] [Rank 0] step:361/10000 train_time:29181ms step_avg:80.83ms +[2025-07-05 10:08:05] [Rank 0] step:381/10000 train_time:30630ms step_avg:80.39ms +[2025-07-05 10:08:05] [Rank 0] step:381/10000 train_time:30630ms step_avg:80.39ms +[2025-07-05 10:08:06] [Rank 0] step:401/10000 train_time:32099ms step_avg:80.05ms +[2025-07-05 10:08:06] [Rank 0] step:401/10000 train_time:32099ms step_avg:80.05ms +[2025-07-05 10:08:08] [Rank 0] step:421/10000 train_time:33573ms step_avg:79.75ms +[2025-07-05 10:08:08] [Rank 0] step:421/10000 train_time:33573ms step_avg:79.75ms +[2025-07-05 10:08:09] [Rank 0] step:441/10000 train_time:35044ms step_avg:79.46ms +[2025-07-05 10:08:09] [Rank 0] step:441/10000 train_time:35044ms step_avg:79.46ms +[2025-07-05 10:08:11] [Rank 0] step:461/10000 train_time:37191ms step_avg:80.68ms +[2025-07-05 10:08:11] [Rank 0] step:461/10000 train_time:37191ms step_avg:80.68ms +[2025-07-05 10:08:13] [Rank 0] step:481/10000 train_time:38662ms step_avg:80.38ms +[2025-07-05 10:08:13] [Rank 0] step:481/10000 train_time:38662ms step_avg:80.38ms +[2025-07-05 10:08:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:08:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:08:15] [Rank 0] PRINT: step:500/10000 train_loss:3.3889 val_loss:1.6169 train_time:40135ms step_avg:80.27ms +[2025-07-05 10:08:15] [Rank 0] PRINT: step:500/10000 train_loss:3.3889 val_loss:1.6169 train_time:40135ms step_avg:80.27ms +[2025-07-05 10:08:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:08:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fa5bbb34fe0ab4f6864d71ca310d73109ffa22ac --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "275cc9f2-318f-4210-b48f-0f7a8fb7149f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_275cc9f2-318f-4210-b48f-0f7a8fb7149f.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_275cc9f2-318f-4210-b48f-0f7a8fb7149f.txt new file mode 100644 index 0000000000000000000000000000000000000000..530b35879987b129e6f8fa04c7ed0e6164f48f9c --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_275cc9f2-318f-4210-b48f-0f7a8fb7149f.txt @@ -0,0 +1,2662 @@ +[2025-07-05 07:49:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 07:49:40 2025 --- +[2025-07-05 07:49:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 07:49:40 2025 --- +[2025-07-05 07:49:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 07:49:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 07:49:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 07:49:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 07:49:40] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 07:49:40] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 07:49:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42 +[2025-07-05 07:49:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42 +[2025-07-05 07:49:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 07:49:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 07:49:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 07:49:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 07:49:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 07:49:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 07:49:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 07:49:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 07:49:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 07:49:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 07:49:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 07:49:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 07:49:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 07:49:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 07:49:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 07:49:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 07:49:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 07:49:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 07:49:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 07:49:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 07:49:44] [Rank 0] PRINT: Model returns: +[2025-07-05 07:49:44] [Rank 0] PRINT: Model returns: +[2025-07-05 07:49:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 07:49:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 07:49:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 07:49:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 07:49:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 07:49:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 07:49:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 07:49:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 07:49:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 07:49:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 07:49:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 07:49:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 07:49:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 07:49:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 07:49:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 07:49:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:01:21] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:01:21] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:01:21] [Rank 0] PRINT: Starting training... +[2025-07-05 08:01:21] [Rank 0] PRINT: Starting training... +[2025-07-05 08:01:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:01:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:05:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:05:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:05:28] [Rank 0] step:21/10000 train_time:2269ms step_avg:108.05ms +[2025-07-05 08:05:28] [Rank 0] step:21/10000 train_time:2269ms step_avg:108.05ms +[2025-07-05 08:05:30] [Rank 0] step:41/10000 train_time:3734ms step_avg:91.07ms +[2025-07-05 08:05:30] [Rank 0] step:41/10000 train_time:3734ms step_avg:91.07ms +[2025-07-05 08:05:31] [Rank 0] step:61/10000 train_time:5203ms step_avg:85.29ms +[2025-07-05 08:05:31] [Rank 0] step:61/10000 train_time:5203ms step_avg:85.29ms +[2025-07-05 08:05:33] [Rank 0] step:81/10000 train_time:6946ms step_avg:85.75ms +[2025-07-05 08:05:33] [Rank 0] step:81/10000 train_time:6946ms step_avg:85.75ms +[2025-07-05 08:05:35] [Rank 0] step:101/10000 train_time:8811ms step_avg:87.24ms +[2025-07-05 08:05:35] [Rank 0] step:101/10000 train_time:8811ms step_avg:87.24ms +[2025-07-05 08:05:36] [Rank 0] step:121/10000 train_time:10282ms step_avg:84.98ms +[2025-07-05 08:05:36] [Rank 0] step:121/10000 train_time:10282ms step_avg:84.98ms +[2025-07-05 08:05:38] [Rank 0] step:141/10000 train_time:11755ms step_avg:83.37ms +[2025-07-05 08:05:38] [Rank 0] step:141/10000 train_time:11755ms step_avg:83.37ms +[2025-07-05 08:05:39] [Rank 0] step:161/10000 train_time:13225ms step_avg:82.14ms +[2025-07-05 08:05:39] [Rank 0] step:161/10000 train_time:13225ms step_avg:82.14ms +[2025-07-05 08:05:41] [Rank 0] step:181/10000 train_time:14696ms step_avg:81.19ms +[2025-07-05 08:05:41] [Rank 0] step:181/10000 train_time:14696ms step_avg:81.19ms +[2025-07-05 08:05:42] [Rank 0] step:201/10000 train_time:16201ms step_avg:80.60ms +[2025-07-05 08:05:42] [Rank 0] step:201/10000 train_time:16201ms step_avg:80.60ms +[2025-07-05 08:05:44] [Rank 0] step:221/10000 train_time:17675ms step_avg:79.98ms +[2025-07-05 08:05:44] [Rank 0] step:221/10000 train_time:17675ms step_avg:79.98ms +[2025-07-05 08:05:45] [Rank 0] step:241/10000 train_time:19146ms step_avg:79.44ms +[2025-07-05 08:05:45] [Rank 0] step:241/10000 train_time:19146ms step_avg:79.44ms +[2025-07-05 08:05:47] [Rank 0] step:261/10000 train_time:20621ms step_avg:79.01ms +[2025-07-05 08:05:47] [Rank 0] step:261/10000 train_time:20621ms step_avg:79.01ms +[2025-07-05 08:05:48] [Rank 0] step:281/10000 train_time:22331ms step_avg:79.47ms +[2025-07-05 08:05:48] [Rank 0] step:281/10000 train_time:22331ms step_avg:79.47ms +[2025-07-05 08:05:50] [Rank 0] step:301/10000 train_time:23805ms step_avg:79.09ms +[2025-07-05 08:05:50] [Rank 0] step:301/10000 train_time:23805ms step_avg:79.09ms +[2025-07-05 08:05:51] [Rank 0] step:321/10000 train_time:25278ms step_avg:78.75ms +[2025-07-05 08:05:51] [Rank 0] step:321/10000 train_time:25278ms step_avg:78.75ms +[2025-07-05 08:05:53] [Rank 0] step:341/10000 train_time:26750ms step_avg:78.45ms +[2025-07-05 08:05:53] [Rank 0] step:341/10000 train_time:26750ms step_avg:78.45ms +[2025-07-05 08:05:54] [Rank 0] step:361/10000 train_time:28277ms step_avg:78.33ms +[2025-07-05 08:05:54] [Rank 0] step:361/10000 train_time:28277ms step_avg:78.33ms +[2025-07-05 08:05:56] [Rank 0] step:381/10000 train_time:29731ms step_avg:78.03ms +[2025-07-05 08:05:56] [Rank 0] step:381/10000 train_time:29731ms step_avg:78.03ms +[2025-07-05 08:05:57] [Rank 0] step:401/10000 train_time:31205ms step_avg:77.82ms +[2025-07-05 08:05:57] [Rank 0] step:401/10000 train_time:31205ms step_avg:77.82ms +[2025-07-05 08:05:59] [Rank 0] step:421/10000 train_time:32678ms step_avg:77.62ms +[2025-07-05 08:05:59] [Rank 0] step:421/10000 train_time:32678ms step_avg:77.62ms +[2025-07-05 08:06:00] [Rank 0] step:441/10000 train_time:34151ms step_avg:77.44ms +[2025-07-05 08:06:00] [Rank 0] step:441/10000 train_time:34151ms step_avg:77.44ms +[2025-07-05 08:06:02] [Rank 0] step:461/10000 train_time:35859ms step_avg:77.79ms +[2025-07-05 08:06:02] [Rank 0] step:461/10000 train_time:35859ms step_avg:77.79ms +[2025-07-05 08:06:03] [Rank 0] step:481/10000 train_time:37332ms step_avg:77.61ms +[2025-07-05 08:06:03] [Rank 0] step:481/10000 train_time:37332ms step_avg:77.61ms +[2025-07-05 08:06:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:06:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:06:06] [Rank 0] PRINT: step:500/10000 train_loss:2.2952 val_loss:1.3667 train_time:38804ms step_avg:77.61ms +[2025-07-05 08:06:06] [Rank 0] PRINT: step:500/10000 train_loss:2.2952 val_loss:1.3667 train_time:38804ms step_avg:77.61ms +[2025-07-05 08:06:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:06:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6fcc743ebec949841fb206a2aa2b2c429529494e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f400bcfa-f67a-49f0-ab92-746b9eb90be0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_f400bcfa-f67a-49f0-ab92-746b9eb90be0.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_f400bcfa-f67a-49f0-ab92-746b9eb90be0.txt new file mode 100644 index 0000000000000000000000000000000000000000..4a795aef8f7751e064cf816c19c771c2d362101e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_f400bcfa-f67a-49f0-ab92-746b9eb90be0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:33:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:33:09 2025 --- +[2025-07-05 08:33:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:33:09 2025 --- +[2025-07-05 08:33:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:33:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:33:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:33:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:33:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:33:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:33:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43 +[2025-07-05 08:33:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43 +[2025-07-05 08:33:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:33:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:33:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:33:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:33:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:33:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:33:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:33:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:33:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:33:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:33:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:33:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:33:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:33:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:33:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:33:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:33:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:33:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:33:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:33:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:33:12] [Rank 0] PRINT: Model returns: +[2025-07-05 08:33:12] [Rank 0] PRINT: Model returns: +[2025-07-05 08:33:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:33:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:33:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:33:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:33:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:33:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:33:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:33:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:33:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:33:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:33:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:33:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:33:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:33:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:33:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:33:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:34:18] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:34:18] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:34:19] [Rank 0] PRINT: Starting training... +[2025-07-05 08:34:19] [Rank 0] PRINT: Starting training... +[2025-07-05 08:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:34:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:34:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:34:28] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.48ms +[2025-07-05 08:34:28] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.48ms +[2025-07-05 08:34:29] [Rank 0] step:41/10000 train_time:3215ms step_avg:78.41ms +[2025-07-05 08:34:29] [Rank 0] step:41/10000 train_time:3215ms step_avg:78.41ms +[2025-07-05 08:34:30] [Rank 0] step:61/10000 train_time:4682ms step_avg:76.75ms +[2025-07-05 08:34:30] [Rank 0] step:61/10000 train_time:4682ms step_avg:76.75ms +[2025-07-05 08:34:32] [Rank 0] step:81/10000 train_time:6150ms step_avg:75.93ms +[2025-07-05 08:34:32] [Rank 0] step:81/10000 train_time:6150ms step_avg:75.93ms +[2025-07-05 08:34:34] [Rank 0] step:101/10000 train_time:8279ms step_avg:81.97ms +[2025-07-05 08:34:34] [Rank 0] step:101/10000 train_time:8279ms step_avg:81.97ms +[2025-07-05 08:34:36] [Rank 0] step:121/10000 train_time:9746ms step_avg:80.54ms +[2025-07-05 08:34:36] [Rank 0] step:121/10000 train_time:9746ms step_avg:80.54ms +[2025-07-05 08:34:37] [Rank 0] step:141/10000 train_time:11214ms step_avg:79.53ms +[2025-07-05 08:34:37] [Rank 0] step:141/10000 train_time:11214ms step_avg:79.53ms +[2025-07-05 08:34:38] [Rank 0] step:161/10000 train_time:12685ms step_avg:78.79ms +[2025-07-05 08:34:38] [Rank 0] step:161/10000 train_time:12685ms step_avg:78.79ms +[2025-07-05 08:34:40] [Rank 0] step:181/10000 train_time:14200ms step_avg:78.45ms +[2025-07-05 08:34:40] [Rank 0] step:181/10000 train_time:14200ms step_avg:78.45ms +[2025-07-05 08:34:42] [Rank 0] step:201/10000 train_time:15854ms step_avg:78.87ms +[2025-07-05 08:34:42] [Rank 0] step:201/10000 train_time:15854ms step_avg:78.87ms +[2025-07-05 08:34:43] [Rank 0] step:221/10000 train_time:17325ms step_avg:78.39ms +[2025-07-05 08:34:43] [Rank 0] step:221/10000 train_time:17325ms step_avg:78.39ms +[2025-07-05 08:34:45] [Rank 0] step:241/10000 train_time:18795ms step_avg:77.99ms +[2025-07-05 08:34:45] [Rank 0] step:241/10000 train_time:18795ms step_avg:77.99ms +[2025-07-05 08:34:46] [Rank 0] step:261/10000 train_time:20263ms step_avg:77.64ms +[2025-07-05 08:34:46] [Rank 0] step:261/10000 train_time:20263ms step_avg:77.64ms +[2025-07-05 08:34:48] [Rank 0] step:281/10000 train_time:21969ms step_avg:78.18ms +[2025-07-05 08:34:48] [Rank 0] step:281/10000 train_time:21969ms step_avg:78.18ms +[2025-07-05 08:34:49] [Rank 0] step:301/10000 train_time:23437ms step_avg:77.86ms +[2025-07-05 08:34:49] [Rank 0] step:301/10000 train_time:23437ms step_avg:77.86ms +[2025-07-05 08:34:51] [Rank 0] step:321/10000 train_time:24908ms step_avg:77.59ms +[2025-07-05 08:34:51] [Rank 0] step:321/10000 train_time:24908ms step_avg:77.59ms +[2025-07-05 08:34:52] [Rank 0] step:341/10000 train_time:26381ms step_avg:77.36ms +[2025-07-05 08:34:52] [Rank 0] step:341/10000 train_time:26381ms step_avg:77.36ms +[2025-07-05 08:34:54] [Rank 0] step:361/10000 train_time:28105ms step_avg:77.85ms +[2025-07-05 08:34:54] [Rank 0] step:361/10000 train_time:28105ms step_avg:77.85ms +[2025-07-05 08:34:56] [Rank 0] step:381/10000 train_time:29974ms step_avg:78.67ms +[2025-07-05 08:34:56] [Rank 0] step:381/10000 train_time:29974ms step_avg:78.67ms +[2025-07-05 08:34:57] [Rank 0] step:401/10000 train_time:31443ms step_avg:78.41ms +[2025-07-05 08:34:57] [Rank 0] step:401/10000 train_time:31443ms step_avg:78.41ms +[2025-07-05 08:34:59] [Rank 0] step:421/10000 train_time:32914ms step_avg:78.18ms +[2025-07-05 08:34:59] [Rank 0] step:421/10000 train_time:32914ms step_avg:78.18ms +[2025-07-05 08:35:00] [Rank 0] step:441/10000 train_time:34381ms step_avg:77.96ms +[2025-07-05 08:35:00] [Rank 0] step:441/10000 train_time:34381ms step_avg:77.96ms +[2025-07-05 08:35:02] [Rank 0] step:461/10000 train_time:36089ms step_avg:78.28ms +[2025-07-05 08:35:02] [Rank 0] step:461/10000 train_time:36089ms step_avg:78.28ms +[2025-07-05 08:35:03] [Rank 0] step:481/10000 train_time:37556ms step_avg:78.08ms +[2025-07-05 08:35:03] [Rank 0] step:481/10000 train_time:37556ms step_avg:78.08ms +[2025-07-05 08:35:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:35:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:35:06] [Rank 0] PRINT: step:500/10000 train_loss:2.2940 val_loss:1.3653 train_time:39208ms step_avg:78.42ms +[2025-07-05 08:35:06] [Rank 0] PRINT: step:500/10000 train_loss:2.2940 val_loss:1.3653 train_time:39208ms step_avg:78.42ms +[2025-07-05 08:35:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:35:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d268c1f0c1aaa5eae414b52159b49ed85e1bd28e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "902a0c2b-dc7a-40a8-931a-7b867d65f370", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/training_log_902a0c2b-dc7a-40a8-931a-7b867d65f370.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/training_log_902a0c2b-dc7a-40a8-931a-7b867d65f370.txt new file mode 100644 index 0000000000000000000000000000000000000000..68c9934873523d38ef59603c43bf6409086a0a75 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44/training_log_902a0c2b-dc7a-40a8-931a-7b867d65f370.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:00:13] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:00:13 2025 --- +[2025-07-05 09:00:13] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:00:13 2025 --- +[2025-07-05 09:00:13] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:00:13] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:00:13] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:00:13] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:00:13] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:00:13] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:00:13] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44 +[2025-07-05 09:00:13] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_44 +[2025-07-05 09:00:13] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:00:13] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:00:14] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:00:14] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:00:14] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:00:14] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:00:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:00:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:00:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:00:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:00:16] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:00:16] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:00:16] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:00:16] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:00:16] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:00:16] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:00:16] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:00:16] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:00:16] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:00:16] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:00:16] [Rank 0] PRINT: Model returns: +[2025-07-05 09:00:16] [Rank 0] PRINT: Model returns: +[2025-07-05 09:00:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:00:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:00:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:00:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:00:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:00:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:00:16] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:00:16] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:00:16] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:00:16] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:00:16] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:00:16] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:00:16] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:00:16] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:00:16] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:00:16] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:01:21] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:01:21] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:01:21] [Rank 0] PRINT: Starting training... +[2025-07-05 09:01:21] [Rank 0] PRINT: Starting training... +[2025-07-05 09:01:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:01:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:01:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:01:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:01:30] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.41ms +[2025-07-05 09:01:30] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.41ms +[2025-07-05 09:01:31] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 09:01:31] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.36ms +[2025-07-05 09:01:33] [Rank 0] step:61/10000 train_time:4675ms step_avg:76.65ms +[2025-07-05 09:01:33] [Rank 0] step:61/10000 train_time:4675ms step_avg:76.65ms +[2025-07-05 09:01:34] [Rank 0] step:81/10000 train_time:6140ms step_avg:75.80ms +[2025-07-05 09:01:34] [Rank 0] step:81/10000 train_time:6140ms step_avg:75.80ms +[2025-07-05 09:01:36] [Rank 0] step:101/10000 train_time:8263ms step_avg:81.81ms +[2025-07-05 09:01:36] [Rank 0] step:101/10000 train_time:8263ms step_avg:81.81ms +[2025-07-05 09:01:38] [Rank 0] step:121/10000 train_time:9729ms step_avg:80.41ms +[2025-07-05 09:01:38] [Rank 0] step:121/10000 train_time:9729ms step_avg:80.41ms +[2025-07-05 09:01:39] [Rank 0] step:141/10000 train_time:11193ms step_avg:79.38ms +[2025-07-05 09:01:39] [Rank 0] step:141/10000 train_time:11193ms step_avg:79.38ms +[2025-07-05 09:01:41] [Rank 0] step:161/10000 train_time:12659ms step_avg:78.63ms +[2025-07-05 09:01:41] [Rank 0] step:161/10000 train_time:12659ms step_avg:78.63ms +[2025-07-05 09:01:43] [Rank 0] step:181/10000 train_time:14392ms step_avg:79.52ms +[2025-07-05 09:01:43] [Rank 0] step:181/10000 train_time:14392ms step_avg:79.52ms +[2025-07-05 09:01:44] [Rank 0] step:201/10000 train_time:16260ms step_avg:80.89ms +[2025-07-05 09:01:44] [Rank 0] step:201/10000 train_time:16260ms step_avg:80.89ms +[2025-07-05 09:01:46] [Rank 0] step:221/10000 train_time:17725ms step_avg:80.20ms +[2025-07-05 09:01:46] [Rank 0] step:221/10000 train_time:17725ms step_avg:80.20ms +[2025-07-05 09:01:47] [Rank 0] step:241/10000 train_time:19192ms step_avg:79.64ms +[2025-07-05 09:01:47] [Rank 0] step:241/10000 train_time:19192ms step_avg:79.64ms +[2025-07-05 09:01:49] [Rank 0] step:261/10000 train_time:20660ms step_avg:79.16ms +[2025-07-05 09:01:49] [Rank 0] step:261/10000 train_time:20660ms step_avg:79.16ms +[2025-07-05 09:01:51] [Rank 0] step:281/10000 train_time:22367ms step_avg:79.60ms +[2025-07-05 09:01:51] [Rank 0] step:281/10000 train_time:22367ms step_avg:79.60ms +[2025-07-05 09:01:52] [Rank 0] step:301/10000 train_time:23837ms step_avg:79.19ms +[2025-07-05 09:01:52] [Rank 0] step:301/10000 train_time:23837ms step_avg:79.19ms +[2025-07-05 09:01:54] [Rank 0] step:321/10000 train_time:25305ms step_avg:78.83ms +[2025-07-05 09:01:54] [Rank 0] step:321/10000 train_time:25305ms step_avg:78.83ms +[2025-07-05 09:01:55] [Rank 0] step:341/10000 train_time:26775ms step_avg:78.52ms +[2025-07-05 09:01:55] [Rank 0] step:341/10000 train_time:26775ms step_avg:78.52ms +[2025-07-05 09:01:57] [Rank 0] step:361/10000 train_time:28942ms step_avg:80.17ms +[2025-07-05 09:01:57] [Rank 0] step:361/10000 train_time:28942ms step_avg:80.17ms +[2025-07-05 09:01:59] [Rank 0] step:381/10000 train_time:30373ms step_avg:79.72ms +[2025-07-05 09:01:59] [Rank 0] step:381/10000 train_time:30373ms step_avg:79.72ms +[2025-07-05 09:02:00] [Rank 0] step:401/10000 train_time:31840ms step_avg:79.40ms +[2025-07-05 09:02:00] [Rank 0] step:401/10000 train_time:31840ms step_avg:79.40ms +[2025-07-05 09:02:02] [Rank 0] step:421/10000 train_time:33307ms step_avg:79.11ms +[2025-07-05 09:02:02] [Rank 0] step:421/10000 train_time:33307ms step_avg:79.11ms +[2025-07-05 09:02:03] [Rank 0] step:441/10000 train_time:34776ms step_avg:78.86ms +[2025-07-05 09:02:03] [Rank 0] step:441/10000 train_time:34776ms step_avg:78.86ms +[2025-07-05 09:02:05] [Rank 0] step:461/10000 train_time:36898ms step_avg:80.04ms +[2025-07-05 09:02:05] [Rank 0] step:461/10000 train_time:36898ms step_avg:80.04ms +[2025-07-05 09:02:07] [Rank 0] step:481/10000 train_time:38362ms step_avg:79.76ms +[2025-07-05 09:02:07] [Rank 0] step:481/10000 train_time:38362ms step_avg:79.76ms +[2025-07-05 09:02:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:02:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:02:09] [Rank 0] PRINT: step:500/10000 train_loss:2.2947 val_loss:1.3669 train_time:39829ms step_avg:79.66ms +[2025-07-05 09:02:09] [Rank 0] PRINT: step:500/10000 train_loss:2.2947 val_loss:1.3669 train_time:39829ms step_avg:79.66ms +[2025-07-05 09:02:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:02:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..af4d551d1f43159a58028d56c0b7405ec1c13a0e --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "af19707c-b6a5-4566-a059-0b4ba441d009", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_af19707c-b6a5-4566-a059-0b4ba441d009.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_af19707c-b6a5-4566-a059-0b4ba441d009.txt new file mode 100644 index 0000000000000000000000000000000000000000..c55afa90fe7bd1306b38c46c1b4d47da6597864a --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_af19707c-b6a5-4566-a059-0b4ba441d009.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:27:51] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:27:51 2025 --- +[2025-07-05 09:27:51] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:27:51 2025 --- +[2025-07-05 09:27:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:27:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:27:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:27:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:27:51] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:27:51] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:27:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45 +[2025-07-05 09:27:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45 +[2025-07-05 09:27:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:27:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:27:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:27:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:27:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:27:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:27:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:27:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:27:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:27:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:27:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:27:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:27:54] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:27:54] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:27:54] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:27:54] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:27:54] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:27:54] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:27:54] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:27:54] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:27:55] [Rank 0] PRINT: Model returns: +[2025-07-05 09:27:55] [Rank 0] PRINT: Model returns: +[2025-07-05 09:27:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:27:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:27:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:27:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:27:55] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:27:55] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:27:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:27:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:27:55] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:27:55] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:27:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:27:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:27:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:27:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:27:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:27:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:29:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:29:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:29:00] [Rank 0] PRINT: Starting training... +[2025-07-05 09:29:00] [Rank 0] PRINT: Starting training... +[2025-07-05 09:29:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:29:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:29:09] [Rank 0] step:21/10000 train_time:1759ms step_avg:83.76ms +[2025-07-05 09:29:09] [Rank 0] step:21/10000 train_time:1759ms step_avg:83.76ms +[2025-07-05 09:29:10] [Rank 0] step:41/10000 train_time:3223ms step_avg:78.62ms +[2025-07-05 09:29:10] [Rank 0] step:41/10000 train_time:3223ms step_avg:78.62ms +[2025-07-05 09:29:12] [Rank 0] step:61/10000 train_time:4689ms step_avg:76.86ms +[2025-07-05 09:29:12] [Rank 0] step:61/10000 train_time:4689ms step_avg:76.86ms +[2025-07-05 09:29:13] [Rank 0] step:81/10000 train_time:6154ms step_avg:75.97ms +[2025-07-05 09:29:13] [Rank 0] step:81/10000 train_time:6154ms step_avg:75.97ms +[2025-07-05 09:29:15] [Rank 0] step:101/10000 train_time:7860ms step_avg:77.82ms +[2025-07-05 09:29:15] [Rank 0] step:101/10000 train_time:7860ms step_avg:77.82ms +[2025-07-05 09:29:16] [Rank 0] step:121/10000 train_time:9326ms step_avg:77.07ms +[2025-07-05 09:29:16] [Rank 0] step:121/10000 train_time:9326ms step_avg:77.07ms +[2025-07-05 09:29:18] [Rank 0] step:141/10000 train_time:10792ms step_avg:76.54ms +[2025-07-05 09:29:18] [Rank 0] step:141/10000 train_time:10792ms step_avg:76.54ms +[2025-07-05 09:29:19] [Rank 0] step:161/10000 train_time:12259ms step_avg:76.14ms +[2025-07-05 09:29:19] [Rank 0] step:161/10000 train_time:12259ms step_avg:76.14ms +[2025-07-05 09:29:21] [Rank 0] step:181/10000 train_time:14410ms step_avg:79.61ms +[2025-07-05 09:29:21] [Rank 0] step:181/10000 train_time:14410ms step_avg:79.61ms +[2025-07-05 09:29:23] [Rank 0] step:201/10000 train_time:15857ms step_avg:78.89ms +[2025-07-05 09:29:23] [Rank 0] step:201/10000 train_time:15857ms step_avg:78.89ms +[2025-07-05 09:29:24] [Rank 0] step:221/10000 train_time:17324ms step_avg:78.39ms +[2025-07-05 09:29:24] [Rank 0] step:221/10000 train_time:17324ms step_avg:78.39ms +[2025-07-05 09:29:26] [Rank 0] step:241/10000 train_time:18793ms step_avg:77.98ms +[2025-07-05 09:29:26] [Rank 0] step:241/10000 train_time:18793ms step_avg:77.98ms +[2025-07-05 09:29:27] [Rank 0] step:261/10000 train_time:20264ms step_avg:77.64ms +[2025-07-05 09:29:27] [Rank 0] step:261/10000 train_time:20264ms step_avg:77.64ms +[2025-07-05 09:29:29] [Rank 0] step:281/10000 train_time:21969ms step_avg:78.18ms +[2025-07-05 09:29:29] [Rank 0] step:281/10000 train_time:21969ms step_avg:78.18ms +[2025-07-05 09:29:30] [Rank 0] step:301/10000 train_time:23440ms step_avg:77.87ms +[2025-07-05 09:29:30] [Rank 0] step:301/10000 train_time:23440ms step_avg:77.87ms +[2025-07-05 09:29:32] [Rank 0] step:321/10000 train_time:24910ms step_avg:77.60ms +[2025-07-05 09:29:32] [Rank 0] step:321/10000 train_time:24910ms step_avg:77.60ms +[2025-07-05 09:29:33] [Rank 0] step:341/10000 train_time:26380ms step_avg:77.36ms +[2025-07-05 09:29:33] [Rank 0] step:341/10000 train_time:26380ms step_avg:77.36ms +[2025-07-05 09:29:36] [Rank 0] step:361/10000 train_time:27900ms step_avg:77.29ms +[2025-07-05 09:29:36] [Rank 0] step:361/10000 train_time:27900ms step_avg:77.29ms +[2025-07-05 09:29:37] [Rank 0] step:381/10000 train_time:29973ms step_avg:78.67ms +[2025-07-05 09:29:37] [Rank 0] step:381/10000 train_time:29973ms step_avg:78.67ms +[2025-07-05 09:29:38] [Rank 0] step:401/10000 train_time:31441ms step_avg:78.41ms +[2025-07-05 09:29:38] [Rank 0] step:401/10000 train_time:31441ms step_avg:78.41ms +[2025-07-05 09:29:40] [Rank 0] step:421/10000 train_time:32909ms step_avg:78.17ms +[2025-07-05 09:29:40] [Rank 0] step:421/10000 train_time:32909ms step_avg:78.17ms +[2025-07-05 09:29:41] [Rank 0] step:441/10000 train_time:34377ms step_avg:77.95ms +[2025-07-05 09:29:41] [Rank 0] step:441/10000 train_time:34377ms step_avg:77.95ms +[2025-07-05 09:29:43] [Rank 0] step:461/10000 train_time:36079ms step_avg:78.26ms +[2025-07-05 09:29:43] [Rank 0] step:461/10000 train_time:36079ms step_avg:78.26ms +[2025-07-05 09:29:45] [Rank 0] step:481/10000 train_time:37546ms step_avg:78.06ms +[2025-07-05 09:29:45] [Rank 0] step:481/10000 train_time:37546ms step_avg:78.06ms +[2025-07-05 09:29:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:47] [Rank 0] PRINT: step:500/10000 train_loss:2.2945 val_loss:1.3663 train_time:39012ms step_avg:78.02ms +[2025-07-05 09:29:47] [Rank 0] PRINT: step:500/10000 train_loss:2.2945 val_loss:1.3663 train_time:39012ms step_avg:78.02ms +[2025-07-05 09:29:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:29:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..741d6fd7f2683ce2f7f6abe1629da94c2de5edc5 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8db3169f-c04f-4d4f-9857-74b25a3bc8a7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_8db3169f-c04f-4d4f-9857-74b25a3bc8a7.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_8db3169f-c04f-4d4f-9857-74b25a3bc8a7.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d0cf26e1862642ded602db911e137b6f60bfc97 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_8db3169f-c04f-4d4f-9857-74b25a3bc8a7.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:54:56] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:54:56 2025 --- +[2025-07-05 09:54:56] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:54:56 2025 --- +[2025-07-05 09:54:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:54:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:54:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:54:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:54:56] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:54:56] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 09:54:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46 +[2025-07-05 09:54:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46 +[2025-07-05 09:54:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:54:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:54:56] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:54:56] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:54:56] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:54:56] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:54:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:54:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:54:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:54:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:54:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:54:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:55:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:55:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:55:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:55:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:55:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:55:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:55:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:55:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:55:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:55:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:55:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:55:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:55:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:55:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:55:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:55:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:55:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:55:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:55:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:55:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:55:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:55:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:55:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:55:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:55:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:55:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:56:05] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:56:05] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:56:05] [Rank 0] PRINT: Starting training... +[2025-07-05 09:56:05] [Rank 0] PRINT: Starting training... +[2025-07-05 09:56:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:13] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:56:13] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:56:15] [Rank 0] step:21/10000 train_time:1907ms step_avg:90.79ms +[2025-07-05 09:56:15] [Rank 0] step:21/10000 train_time:1907ms step_avg:90.79ms +[2025-07-05 09:56:16] [Rank 0] step:41/10000 train_time:3435ms step_avg:83.78ms +[2025-07-05 09:56:16] [Rank 0] step:41/10000 train_time:3435ms step_avg:83.78ms +[2025-07-05 09:56:18] [Rank 0] step:61/10000 train_time:4954ms step_avg:81.22ms +[2025-07-05 09:56:18] [Rank 0] step:61/10000 train_time:4954ms step_avg:81.22ms +[2025-07-05 09:56:19] [Rank 0] step:81/10000 train_time:6419ms step_avg:79.25ms +[2025-07-05 09:56:19] [Rank 0] step:81/10000 train_time:6419ms step_avg:79.25ms +[2025-07-05 09:56:21] [Rank 0] step:101/10000 train_time:8128ms step_avg:80.47ms +[2025-07-05 09:56:21] [Rank 0] step:101/10000 train_time:8128ms step_avg:80.47ms +[2025-07-05 09:56:22] [Rank 0] step:121/10000 train_time:9593ms step_avg:79.28ms +[2025-07-05 09:56:22] [Rank 0] step:121/10000 train_time:9593ms step_avg:79.28ms +[2025-07-05 09:56:24] [Rank 0] step:141/10000 train_time:11059ms step_avg:78.44ms +[2025-07-05 09:56:24] [Rank 0] step:141/10000 train_time:11059ms step_avg:78.44ms +[2025-07-05 09:56:25] [Rank 0] step:161/10000 train_time:12529ms step_avg:77.82ms +[2025-07-05 09:56:25] [Rank 0] step:161/10000 train_time:12529ms step_avg:77.82ms +[2025-07-05 09:56:27] [Rank 0] step:181/10000 train_time:13996ms step_avg:77.33ms +[2025-07-05 09:56:27] [Rank 0] step:181/10000 train_time:13996ms step_avg:77.33ms +[2025-07-05 09:56:28] [Rank 0] step:201/10000 train_time:15697ms step_avg:78.09ms +[2025-07-05 09:56:28] [Rank 0] step:201/10000 train_time:15697ms step_avg:78.09ms +[2025-07-05 09:56:30] [Rank 0] step:221/10000 train_time:17166ms step_avg:77.68ms +[2025-07-05 09:56:30] [Rank 0] step:221/10000 train_time:17166ms step_avg:77.68ms +[2025-07-05 09:56:31] [Rank 0] step:241/10000 train_time:18637ms step_avg:77.33ms +[2025-07-05 09:56:31] [Rank 0] step:241/10000 train_time:18637ms step_avg:77.33ms +[2025-07-05 09:56:33] [Rank 0] step:261/10000 train_time:20109ms step_avg:77.05ms +[2025-07-05 09:56:33] [Rank 0] step:261/10000 train_time:20109ms step_avg:77.05ms +[2025-07-05 09:56:35] [Rank 0] step:281/10000 train_time:21813ms step_avg:77.63ms +[2025-07-05 09:56:35] [Rank 0] step:281/10000 train_time:21813ms step_avg:77.63ms +[2025-07-05 09:56:36] [Rank 0] step:301/10000 train_time:23285ms step_avg:77.36ms +[2025-07-05 09:56:36] [Rank 0] step:301/10000 train_time:23285ms step_avg:77.36ms +[2025-07-05 09:56:37] [Rank 0] step:321/10000 train_time:24756ms step_avg:77.12ms +[2025-07-05 09:56:37] [Rank 0] step:321/10000 train_time:24756ms step_avg:77.12ms +[2025-07-05 09:56:39] [Rank 0] step:341/10000 train_time:26227ms step_avg:76.91ms +[2025-07-05 09:56:39] [Rank 0] step:341/10000 train_time:26227ms step_avg:76.91ms +[2025-07-05 09:56:40] [Rank 0] step:361/10000 train_time:27748ms step_avg:76.87ms +[2025-07-05 09:56:40] [Rank 0] step:361/10000 train_time:27748ms step_avg:76.87ms +[2025-07-05 09:56:42] [Rank 0] step:381/10000 train_time:29201ms step_avg:76.64ms +[2025-07-05 09:56:42] [Rank 0] step:381/10000 train_time:29201ms step_avg:76.64ms +[2025-07-05 09:56:43] [Rank 0] step:401/10000 train_time:30671ms step_avg:76.49ms +[2025-07-05 09:56:43] [Rank 0] step:401/10000 train_time:30671ms step_avg:76.49ms +[2025-07-05 09:56:45] [Rank 0] step:421/10000 train_time:32141ms step_avg:76.34ms +[2025-07-05 09:56:45] [Rank 0] step:421/10000 train_time:32141ms step_avg:76.34ms +[2025-07-05 09:56:46] [Rank 0] step:441/10000 train_time:33608ms step_avg:76.21ms +[2025-07-05 09:56:46] [Rank 0] step:441/10000 train_time:33608ms step_avg:76.21ms +[2025-07-05 09:56:48] [Rank 0] step:461/10000 train_time:35722ms step_avg:77.49ms +[2025-07-05 09:56:48] [Rank 0] step:461/10000 train_time:35722ms step_avg:77.49ms +[2025-07-05 09:56:50] [Rank 0] step:481/10000 train_time:37191ms step_avg:77.32ms +[2025-07-05 09:56:50] [Rank 0] step:481/10000 train_time:37191ms step_avg:77.32ms +[2025-07-05 09:56:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:52] [Rank 0] PRINT: step:500/10000 train_loss:2.2935 val_loss:1.3656 train_time:38656ms step_avg:77.31ms +[2025-07-05 09:56:52] [Rank 0] PRINT: step:500/10000 train_loss:2.2935 val_loss:1.3656 train_time:38656ms step_avg:77.31ms +[2025-07-05 09:56:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:56:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..daca6d930e87ae50d46894ed85680a410dae9694 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d39945ff-8456-428d-9b1d-7038ceeb8fc0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/training_log_d39945ff-8456-428d-9b1d-7038ceeb8fc0.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/training_log_d39945ff-8456-428d-9b1d-7038ceeb8fc0.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4e3e2079f15c8425843ef12d8f4d29eb8f6bac6 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47/training_log_d39945ff-8456-428d-9b1d-7038ceeb8fc0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 07:57:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 07:57:57 2025 --- +[2025-07-05 07:57:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 07:57:57 2025 --- +[2025-07-05 07:57:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 07:57:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 07:57:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 07:57:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 07:57:57] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 07:57:57] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 07:57:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47 +[2025-07-05 07:57:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_47 +[2025-07-05 07:57:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 07:57:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 07:57:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 07:57:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 07:57:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 07:57:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 07:58:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 07:58:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 07:58:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 07:58:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 07:58:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 07:58:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 07:58:01] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 07:58:01] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 07:58:01] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 07:58:01] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 07:58:01] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 07:58:01] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 07:58:01] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 07:58:01] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 07:58:01] [Rank 0] PRINT: Model returns: +[2025-07-05 07:58:01] [Rank 0] PRINT: Model returns: +[2025-07-05 07:58:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 07:58:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 07:58:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 07:58:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 07:58:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 07:58:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 07:58:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 07:58:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 07:58:01] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 07:58:01] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 07:58:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 07:58:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 07:58:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 07:58:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 07:58:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 07:58:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:10:41] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:10:41] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:10:41] [Rank 0] PRINT: Starting training... +[2025-07-05 08:10:41] [Rank 0] PRINT: Starting training... +[2025-07-05 08:10:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:10:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:14:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:14:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:14:48] [Rank 0] step:21/10000 train_time:2284ms step_avg:108.77ms +[2025-07-05 08:14:48] [Rank 0] step:21/10000 train_time:2284ms step_avg:108.77ms +[2025-07-05 08:14:49] [Rank 0] step:41/10000 train_time:3902ms step_avg:95.18ms +[2025-07-05 08:14:49] [Rank 0] step:41/10000 train_time:3902ms step_avg:95.18ms +[2025-07-05 08:14:51] [Rank 0] step:61/10000 train_time:5424ms step_avg:88.91ms +[2025-07-05 08:14:51] [Rank 0] step:61/10000 train_time:5424ms step_avg:88.91ms +[2025-07-05 08:14:52] [Rank 0] step:81/10000 train_time:6887ms step_avg:85.03ms +[2025-07-05 08:14:52] [Rank 0] step:81/10000 train_time:6887ms step_avg:85.03ms +[2025-07-05 08:14:55] [Rank 0] step:101/10000 train_time:9037ms step_avg:89.48ms +[2025-07-05 08:14:55] [Rank 0] step:101/10000 train_time:9037ms step_avg:89.48ms +[2025-07-05 08:14:56] [Rank 0] step:121/10000 train_time:10499ms step_avg:86.77ms +[2025-07-05 08:14:56] [Rank 0] step:121/10000 train_time:10499ms step_avg:86.77ms +[2025-07-05 08:14:57] [Rank 0] step:141/10000 train_time:11965ms step_avg:84.86ms +[2025-07-05 08:14:57] [Rank 0] step:141/10000 train_time:11965ms step_avg:84.86ms +[2025-07-05 08:14:59] [Rank 0] step:161/10000 train_time:13427ms step_avg:83.40ms +[2025-07-05 08:14:59] [Rank 0] step:161/10000 train_time:13427ms step_avg:83.40ms +[2025-07-05 08:15:01] [Rank 0] step:181/10000 train_time:15576ms step_avg:86.05ms +[2025-07-05 08:15:01] [Rank 0] step:181/10000 train_time:15576ms step_avg:86.05ms +[2025-07-05 08:15:03] [Rank 0] step:201/10000 train_time:17124ms step_avg:85.19ms +[2025-07-05 08:15:03] [Rank 0] step:201/10000 train_time:17124ms step_avg:85.19ms +[2025-07-05 08:15:04] [Rank 0] step:221/10000 train_time:18587ms step_avg:84.11ms +[2025-07-05 08:15:04] [Rank 0] step:221/10000 train_time:18587ms step_avg:84.11ms +[2025-07-05 08:15:06] [Rank 0] step:241/10000 train_time:20054ms step_avg:83.21ms +[2025-07-05 08:15:06] [Rank 0] step:241/10000 train_time:20054ms step_avg:83.21ms +[2025-07-05 08:15:07] [Rank 0] step:261/10000 train_time:21522ms step_avg:82.46ms +[2025-07-05 08:15:07] [Rank 0] step:261/10000 train_time:21522ms step_avg:82.46ms +[2025-07-05 08:15:09] [Rank 0] step:281/10000 train_time:23223ms step_avg:82.64ms +[2025-07-05 08:15:09] [Rank 0] step:281/10000 train_time:23223ms step_avg:82.64ms +[2025-07-05 08:15:10] [Rank 0] step:301/10000 train_time:24692ms step_avg:82.03ms +[2025-07-05 08:15:10] [Rank 0] step:301/10000 train_time:24692ms step_avg:82.03ms +[2025-07-05 08:15:12] [Rank 0] step:321/10000 train_time:26159ms step_avg:81.49ms +[2025-07-05 08:15:12] [Rank 0] step:321/10000 train_time:26159ms step_avg:81.49ms +[2025-07-05 08:15:13] [Rank 0] step:341/10000 train_time:27625ms step_avg:81.01ms +[2025-07-05 08:15:13] [Rank 0] step:341/10000 train_time:27625ms step_avg:81.01ms +[2025-07-05 08:15:15] [Rank 0] step:361/10000 train_time:29145ms step_avg:80.73ms +[2025-07-05 08:15:15] [Rank 0] step:361/10000 train_time:29145ms step_avg:80.73ms +[2025-07-05 08:15:17] [Rank 0] step:381/10000 train_time:31230ms step_avg:81.97ms +[2025-07-05 08:15:17] [Rank 0] step:381/10000 train_time:31230ms step_avg:81.97ms +[2025-07-05 08:15:18] [Rank 0] step:401/10000 train_time:32795ms step_avg:81.78ms +[2025-07-05 08:15:18] [Rank 0] step:401/10000 train_time:32795ms step_avg:81.78ms +[2025-07-05 08:15:20] [Rank 0] step:421/10000 train_time:34260ms step_avg:81.38ms +[2025-07-05 08:15:20] [Rank 0] step:421/10000 train_time:34260ms step_avg:81.38ms +[2025-07-05 08:15:21] [Rank 0] step:441/10000 train_time:35727ms step_avg:81.01ms +[2025-07-05 08:15:21] [Rank 0] step:441/10000 train_time:35727ms step_avg:81.01ms +[2025-07-05 08:15:23] [Rank 0] step:461/10000 train_time:37852ms step_avg:82.11ms +[2025-07-05 08:15:23] [Rank 0] step:461/10000 train_time:37852ms step_avg:82.11ms +[2025-07-05 08:15:25] [Rank 0] step:481/10000 train_time:39313ms step_avg:81.73ms +[2025-07-05 08:15:25] [Rank 0] step:481/10000 train_time:39313ms step_avg:81.73ms +[2025-07-05 08:15:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:15:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:15:27] [Rank 0] PRINT: step:500/10000 train_loss:2.2951 val_loss:1.3658 train_time:40775ms step_avg:81.55ms +[2025-07-05 08:15:27] [Rank 0] PRINT: step:500/10000 train_loss:2.2951 val_loss:1.3658 train_time:40775ms step_avg:81.55ms +[2025-07-05 08:15:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:15:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ba99dad98884e1ed9d87f224d6229fb249112002 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "6318183b-0a14-40be-9025-3d0af08e3d26", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_6318183b-0a14-40be-9025-3d0af08e3d26.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_6318183b-0a14-40be-9025-3d0af08e3d26.txt new file mode 100644 index 0000000000000000000000000000000000000000..c63661bd0e6c393c42dde89f0291509bb36f1430 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_6318183b-0a14-40be-9025-3d0af08e3d26.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:41:42] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:41:42 2025 --- +[2025-07-05 08:41:42] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:41:42 2025 --- +[2025-07-05 08:41:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:41:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:41:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:41:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:41:42] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:41:42] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:41:42] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48 +[2025-07-05 08:41:42] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48 +[2025-07-05 08:41:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:41:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:41:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:41:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:41:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:41:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:41:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:41:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:41:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:41:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:41:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:41:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:41:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:41:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:41:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:41:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:41:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:41:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:41:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:41:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:41:45] [Rank 0] PRINT: Model returns: +[2025-07-05 08:41:45] [Rank 0] PRINT: Model returns: +[2025-07-05 08:41:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:41:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:41:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:41:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 08:41:45] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:41:45] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 08:41:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:41:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 08:41:45] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:41:45] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 08:41:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:41:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:41:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:41:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:41:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:41:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:42:50] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:42:50] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:42:50] [Rank 0] PRINT: Starting training... +[2025-07-05 08:42:50] [Rank 0] PRINT: Starting training... +[2025-07-05 08:42:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:42:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:42:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:42:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:42:59] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-05 08:42:59] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-05 08:43:00] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.37ms +[2025-07-05 08:43:00] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.37ms +[2025-07-05 08:43:02] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.66ms +[2025-07-05 08:43:02] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.66ms +[2025-07-05 08:43:03] [Rank 0] step:81/10000 train_time:6142ms step_avg:75.82ms +[2025-07-05 08:43:03] [Rank 0] step:81/10000 train_time:6142ms step_avg:75.82ms +[2025-07-05 08:43:05] [Rank 0] step:101/10000 train_time:8270ms step_avg:81.88ms +[2025-07-05 08:43:05] [Rank 0] step:101/10000 train_time:8270ms step_avg:81.88ms +[2025-07-05 08:43:07] [Rank 0] step:121/10000 train_time:9732ms step_avg:80.43ms +[2025-07-05 08:43:07] [Rank 0] step:121/10000 train_time:9732ms step_avg:80.43ms +[2025-07-05 08:43:08] [Rank 0] step:141/10000 train_time:11196ms step_avg:79.41ms +[2025-07-05 08:43:08] [Rank 0] step:141/10000 train_time:11196ms step_avg:79.41ms +[2025-07-05 08:43:10] [Rank 0] step:161/10000 train_time:12664ms step_avg:78.66ms +[2025-07-05 08:43:10] [Rank 0] step:161/10000 train_time:12664ms step_avg:78.66ms +[2025-07-05 08:43:12] [Rank 0] step:181/10000 train_time:14383ms step_avg:79.46ms +[2025-07-05 08:43:12] [Rank 0] step:181/10000 train_time:14383ms step_avg:79.46ms +[2025-07-05 08:43:13] [Rank 0] step:201/10000 train_time:16235ms step_avg:80.77ms +[2025-07-05 08:43:13] [Rank 0] step:201/10000 train_time:16235ms step_avg:80.77ms +[2025-07-05 08:43:15] [Rank 0] step:221/10000 train_time:17701ms step_avg:80.10ms +[2025-07-05 08:43:15] [Rank 0] step:221/10000 train_time:17701ms step_avg:80.10ms +[2025-07-05 08:43:16] [Rank 0] step:241/10000 train_time:19168ms step_avg:79.54ms +[2025-07-05 08:43:16] [Rank 0] step:241/10000 train_time:19168ms step_avg:79.54ms +[2025-07-05 08:43:18] [Rank 0] step:261/10000 train_time:20636ms step_avg:79.07ms +[2025-07-05 08:43:18] [Rank 0] step:261/10000 train_time:20636ms step_avg:79.07ms +[2025-07-05 08:43:19] [Rank 0] step:281/10000 train_time:22340ms step_avg:79.50ms +[2025-07-05 08:43:19] [Rank 0] step:281/10000 train_time:22340ms step_avg:79.50ms +[2025-07-05 08:43:21] [Rank 0] step:301/10000 train_time:23811ms step_avg:79.11ms +[2025-07-05 08:43:21] [Rank 0] step:301/10000 train_time:23811ms step_avg:79.11ms +[2025-07-05 08:43:22] [Rank 0] step:321/10000 train_time:25278ms step_avg:78.75ms +[2025-07-05 08:43:22] [Rank 0] step:321/10000 train_time:25278ms step_avg:78.75ms +[2025-07-05 08:43:24] [Rank 0] step:341/10000 train_time:26747ms step_avg:78.44ms +[2025-07-05 08:43:24] [Rank 0] step:341/10000 train_time:26747ms step_avg:78.44ms +[2025-07-05 08:43:26] [Rank 0] step:361/10000 train_time:28268ms step_avg:78.30ms +[2025-07-05 08:43:26] [Rank 0] step:361/10000 train_time:28268ms step_avg:78.30ms +[2025-07-05 08:43:27] [Rank 0] step:381/10000 train_time:30348ms step_avg:79.65ms +[2025-07-05 08:43:27] [Rank 0] step:381/10000 train_time:30348ms step_avg:79.65ms +[2025-07-05 08:43:29] [Rank 0] step:401/10000 train_time:31814ms step_avg:79.34ms +[2025-07-05 08:43:29] [Rank 0] step:401/10000 train_time:31814ms step_avg:79.34ms +[2025-07-05 08:43:30] [Rank 0] step:421/10000 train_time:33281ms step_avg:79.05ms +[2025-07-05 08:43:30] [Rank 0] step:421/10000 train_time:33281ms step_avg:79.05ms +[2025-07-05 08:43:32] [Rank 0] step:441/10000 train_time:34749ms step_avg:78.80ms +[2025-07-05 08:43:32] [Rank 0] step:441/10000 train_time:34749ms step_avg:78.80ms +[2025-07-05 08:43:33] [Rank 0] step:461/10000 train_time:36454ms step_avg:79.08ms +[2025-07-05 08:43:33] [Rank 0] step:461/10000 train_time:36454ms step_avg:79.08ms +[2025-07-05 08:43:35] [Rank 0] step:481/10000 train_time:37922ms step_avg:78.84ms +[2025-07-05 08:43:35] [Rank 0] step:481/10000 train_time:37922ms step_avg:78.84ms +[2025-07-05 08:43:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:43:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:43:37] [Rank 0] PRINT: step:500/10000 train_loss:2.2939 val_loss:1.3664 train_time:39388ms step_avg:78.78ms +[2025-07-05 08:43:37] [Rank 0] PRINT: step:500/10000 train_loss:2.2939 val_loss:1.3664 train_time:39388ms step_avg:78.78ms +[2025-07-05 08:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a0ec94301acdb8900ba386a1f45ec6ff9db638d6 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a5dc4b0b-1dd8-423e-9f0e-886df9924c76", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_a5dc4b0b-1dd8-423e-9f0e-886df9924c76.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_a5dc4b0b-1dd8-423e-9f0e-886df9924c76.txt new file mode 100644 index 0000000000000000000000000000000000000000..27bb05b28ce9e8110067ec015da124c4714af1ae --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_a5dc4b0b-1dd8-423e-9f0e-886df9924c76.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:09:10] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:09:10 2025 --- +[2025-07-05 09:09:10] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:09:10 2025 --- +[2025-07-05 09:09:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:09:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:09:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:09:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:09:10] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:09:10] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:09:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49 +[2025-07-05 09:09:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49 +[2025-07-05 09:09:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:09:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:09:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:09:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:09:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:09:10] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:09:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:09:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:09:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:09:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:09:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:09:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:09:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:09:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:09:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:09:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:09:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:09:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:09:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:09:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:09:13] [Rank 0] PRINT: Model returns: +[2025-07-05 09:09:13] [Rank 0] PRINT: Model returns: +[2025-07-05 09:09:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:09:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:09:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:09:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:09:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:09:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:09:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:09:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:09:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:09:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:09:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:09:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:09:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:09:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:09:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:09:13] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:10:19] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:10:19] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:10:19] [Rank 0] PRINT: Starting training... +[2025-07-05 09:10:19] [Rank 0] PRINT: Starting training... +[2025-07-05 09:10:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:10:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:10:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:10:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:10:28] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-05 09:10:28] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-05 09:10:30] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.28ms +[2025-07-05 09:10:30] [Rank 0] step:41/10000 train_time:3210ms step_avg:78.28ms +[2025-07-05 09:10:31] [Rank 0] step:61/10000 train_time:4665ms step_avg:76.47ms +[2025-07-05 09:10:31] [Rank 0] step:61/10000 train_time:4665ms step_avg:76.47ms +[2025-07-05 09:10:32] [Rank 0] step:81/10000 train_time:6122ms step_avg:75.57ms +[2025-07-05 09:10:32] [Rank 0] step:81/10000 train_time:6122ms step_avg:75.57ms +[2025-07-05 09:10:35] [Rank 0] step:101/10000 train_time:8242ms step_avg:81.60ms +[2025-07-05 09:10:35] [Rank 0] step:101/10000 train_time:8242ms step_avg:81.60ms +[2025-07-05 09:10:36] [Rank 0] step:121/10000 train_time:9702ms step_avg:80.19ms +[2025-07-05 09:10:36] [Rank 0] step:121/10000 train_time:9702ms step_avg:80.19ms +[2025-07-05 09:10:38] [Rank 0] step:141/10000 train_time:11160ms step_avg:79.15ms +[2025-07-05 09:10:38] [Rank 0] step:141/10000 train_time:11160ms step_avg:79.15ms +[2025-07-05 09:10:39] [Rank 0] step:161/10000 train_time:12622ms step_avg:78.40ms +[2025-07-05 09:10:39] [Rank 0] step:161/10000 train_time:12622ms step_avg:78.40ms +[2025-07-05 09:10:41] [Rank 0] step:181/10000 train_time:14086ms step_avg:77.82ms +[2025-07-05 09:10:41] [Rank 0] step:181/10000 train_time:14086ms step_avg:77.82ms +[2025-07-05 09:10:43] [Rank 0] step:201/10000 train_time:16446ms step_avg:81.82ms +[2025-07-05 09:10:43] [Rank 0] step:201/10000 train_time:16446ms step_avg:81.82ms +[2025-07-05 09:10:44] [Rank 0] step:221/10000 train_time:17909ms step_avg:81.04ms +[2025-07-05 09:10:44] [Rank 0] step:221/10000 train_time:17909ms step_avg:81.04ms +[2025-07-05 09:10:46] [Rank 0] step:241/10000 train_time:19373ms step_avg:80.38ms +[2025-07-05 09:10:46] [Rank 0] step:241/10000 train_time:19373ms step_avg:80.38ms +[2025-07-05 09:10:47] [Rank 0] step:261/10000 train_time:20836ms step_avg:79.83ms +[2025-07-05 09:10:47] [Rank 0] step:261/10000 train_time:20836ms step_avg:79.83ms +[2025-07-05 09:10:49] [Rank 0] step:281/10000 train_time:22960ms step_avg:81.71ms +[2025-07-05 09:10:49] [Rank 0] step:281/10000 train_time:22960ms step_avg:81.71ms +[2025-07-05 09:10:51] [Rank 0] step:301/10000 train_time:24423ms step_avg:81.14ms +[2025-07-05 09:10:51] [Rank 0] step:301/10000 train_time:24423ms step_avg:81.14ms +[2025-07-05 09:10:52] [Rank 0] step:321/10000 train_time:25886ms step_avg:80.64ms +[2025-07-05 09:10:52] [Rank 0] step:321/10000 train_time:25886ms step_avg:80.64ms +[2025-07-05 09:10:54] [Rank 0] step:341/10000 train_time:27353ms step_avg:80.21ms +[2025-07-05 09:10:54] [Rank 0] step:341/10000 train_time:27353ms step_avg:80.21ms +[2025-07-05 09:10:56] [Rank 0] step:361/10000 train_time:28870ms step_avg:79.97ms +[2025-07-05 09:10:56] [Rank 0] step:361/10000 train_time:28870ms step_avg:79.97ms +[2025-07-05 09:10:57] [Rank 0] step:381/10000 train_time:30925ms step_avg:81.17ms +[2025-07-05 09:10:57] [Rank 0] step:381/10000 train_time:30925ms step_avg:81.17ms +[2025-07-05 09:10:59] [Rank 0] step:401/10000 train_time:32388ms step_avg:80.77ms +[2025-07-05 09:10:59] [Rank 0] step:401/10000 train_time:32388ms step_avg:80.77ms +[2025-07-05 09:11:00] [Rank 0] step:421/10000 train_time:33852ms step_avg:80.41ms +[2025-07-05 09:11:00] [Rank 0] step:421/10000 train_time:33852ms step_avg:80.41ms +[2025-07-05 09:11:02] [Rank 0] step:441/10000 train_time:35313ms step_avg:80.07ms +[2025-07-05 09:11:02] [Rank 0] step:441/10000 train_time:35313ms step_avg:80.07ms +[2025-07-05 09:11:04] [Rank 0] step:461/10000 train_time:37423ms step_avg:81.18ms +[2025-07-05 09:11:04] [Rank 0] step:461/10000 train_time:37423ms step_avg:81.18ms +[2025-07-05 09:11:05] [Rank 0] step:481/10000 train_time:38886ms step_avg:80.84ms +[2025-07-05 09:11:05] [Rank 0] step:481/10000 train_time:38886ms step_avg:80.84ms +[2025-07-05 09:11:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:11:08] [Rank 0] PRINT: step:500/10000 train_loss:2.2975 val_loss:1.3691 train_time:40345ms step_avg:80.69ms +[2025-07-05 09:11:08] [Rank 0] PRINT: step:500/10000 train_loss:2.2975 val_loss:1.3691 train_time:40345ms step_avg:80.69ms +[2025-07-05 09:11:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:11:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..987b1296262b9146993177e8f1dfba6d308a8f1b --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "af29783c-2da7-4b37-a113-4eb7c148fb20", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/training_log_af29783c-2da7-4b37-a113-4eb7c148fb20.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/training_log_af29783c-2da7-4b37-a113-4eb7c148fb20.txt new file mode 100644 index 0000000000000000000000000000000000000000..08114547445f40261013cb37e5691448ff96d8e9 --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50/training_log_af29783c-2da7-4b37-a113-4eb7c148fb20.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:36:33] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:36:33 2025 --- +[2025-07-05 09:36:33] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:36:33 2025 --- +[2025-07-05 09:36:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:36:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:36:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:36:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:36:33] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:36:33] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:36:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50 +[2025-07-05 09:36:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_50 +[2025-07-05 09:36:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:36:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:36:33] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:36:33] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:36:33] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:36:33] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:36:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:36:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:36:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:36:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:36:35] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:36:35] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:36:36] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:36:36] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:36:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:36:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:36:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:36:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:36:36] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:36:36] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:36:36] [Rank 0] PRINT: Model returns: +[2025-07-05 09:36:36] [Rank 0] PRINT: Model returns: +[2025-07-05 09:36:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:36:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:36:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:36:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 09:36:36] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:36:36] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 09:36:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:36:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 09:36:36] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:36:36] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 09:36:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:36:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:36:36] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:36:36] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:36:36] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:36:36] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:37:45] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:37:45] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:37:45] [Rank 0] PRINT: Starting training... +[2025-07-05 09:37:45] [Rank 0] PRINT: Starting training... +[2025-07-05 09:37:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:37:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:37:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:37:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:37:55] [Rank 0] step:21/10000 train_time:1890ms step_avg:89.98ms +[2025-07-05 09:37:55] [Rank 0] step:21/10000 train_time:1890ms step_avg:89.98ms +[2025-07-05 09:37:57] [Rank 0] step:41/10000 train_time:3346ms step_avg:81.61ms +[2025-07-05 09:37:57] [Rank 0] step:41/10000 train_time:3346ms step_avg:81.61ms +[2025-07-05 09:37:58] [Rank 0] step:61/10000 train_time:4805ms step_avg:78.77ms +[2025-07-05 09:37:58] [Rank 0] step:61/10000 train_time:4805ms step_avg:78.77ms +[2025-07-05 09:38:00] [Rank 0] step:81/10000 train_time:6266ms step_avg:77.36ms +[2025-07-05 09:38:00] [Rank 0] step:81/10000 train_time:6266ms step_avg:77.36ms +[2025-07-05 09:38:02] [Rank 0] step:101/10000 train_time:8387ms step_avg:83.04ms +[2025-07-05 09:38:02] [Rank 0] step:101/10000 train_time:8387ms step_avg:83.04ms +[2025-07-05 09:38:03] [Rank 0] step:121/10000 train_time:9850ms step_avg:81.40ms +[2025-07-05 09:38:03] [Rank 0] step:121/10000 train_time:9850ms step_avg:81.40ms +[2025-07-05 09:38:05] [Rank 0] step:141/10000 train_time:11312ms step_avg:80.23ms +[2025-07-05 09:38:05] [Rank 0] step:141/10000 train_time:11312ms step_avg:80.23ms +[2025-07-05 09:38:06] [Rank 0] step:161/10000 train_time:12941ms step_avg:80.38ms +[2025-07-05 09:38:06] [Rank 0] step:161/10000 train_time:12941ms step_avg:80.38ms +[2025-07-05 09:38:09] [Rank 0] step:181/10000 train_time:14545ms step_avg:80.36ms +[2025-07-05 09:38:09] [Rank 0] step:181/10000 train_time:14545ms step_avg:80.36ms +[2025-07-05 09:38:10] [Rank 0] step:201/10000 train_time:16605ms step_avg:82.61ms +[2025-07-05 09:38:10] [Rank 0] step:201/10000 train_time:16605ms step_avg:82.61ms +[2025-07-05 09:38:12] [Rank 0] step:221/10000 train_time:18074ms step_avg:81.78ms +[2025-07-05 09:38:12] [Rank 0] step:221/10000 train_time:18074ms step_avg:81.78ms +[2025-07-05 09:38:13] [Rank 0] step:241/10000 train_time:19542ms step_avg:81.09ms +[2025-07-05 09:38:13] [Rank 0] step:241/10000 train_time:19542ms step_avg:81.09ms +[2025-07-05 09:38:15] [Rank 0] step:261/10000 train_time:21006ms step_avg:80.48ms +[2025-07-05 09:38:15] [Rank 0] step:261/10000 train_time:21006ms step_avg:80.48ms +[2025-07-05 09:38:16] [Rank 0] step:281/10000 train_time:22705ms step_avg:80.80ms +[2025-07-05 09:38:16] [Rank 0] step:281/10000 train_time:22705ms step_avg:80.80ms +[2025-07-05 09:38:18] [Rank 0] step:301/10000 train_time:24172ms step_avg:80.30ms +[2025-07-05 09:38:18] [Rank 0] step:301/10000 train_time:24172ms step_avg:80.30ms +[2025-07-05 09:38:19] [Rank 0] step:321/10000 train_time:25636ms step_avg:79.86ms +[2025-07-05 09:38:19] [Rank 0] step:321/10000 train_time:25636ms step_avg:79.86ms +[2025-07-05 09:38:21] [Rank 0] step:341/10000 train_time:27104ms step_avg:79.49ms +[2025-07-05 09:38:21] [Rank 0] step:341/10000 train_time:27104ms step_avg:79.49ms +[2025-07-05 09:38:23] [Rank 0] step:361/10000 train_time:28825ms step_avg:79.85ms +[2025-07-05 09:38:23] [Rank 0] step:361/10000 train_time:28825ms step_avg:79.85ms +[2025-07-05 09:38:24] [Rank 0] step:381/10000 train_time:30685ms step_avg:80.54ms +[2025-07-05 09:38:24] [Rank 0] step:381/10000 train_time:30685ms step_avg:80.54ms +[2025-07-05 09:38:26] [Rank 0] step:401/10000 train_time:32148ms step_avg:80.17ms +[2025-07-05 09:38:26] [Rank 0] step:401/10000 train_time:32148ms step_avg:80.17ms +[2025-07-05 09:38:27] [Rank 0] step:421/10000 train_time:33613ms step_avg:79.84ms +[2025-07-05 09:38:27] [Rank 0] step:421/10000 train_time:33613ms step_avg:79.84ms +[2025-07-05 09:38:29] [Rank 0] step:441/10000 train_time:35076ms step_avg:79.54ms +[2025-07-05 09:38:29] [Rank 0] step:441/10000 train_time:35076ms step_avg:79.54ms +[2025-07-05 09:38:31] [Rank 0] step:461/10000 train_time:37212ms step_avg:80.72ms +[2025-07-05 09:38:31] [Rank 0] step:461/10000 train_time:37212ms step_avg:80.72ms +[2025-07-05 09:38:32] [Rank 0] step:481/10000 train_time:38675ms step_avg:80.40ms +[2025-07-05 09:38:32] [Rank 0] step:481/10000 train_time:38675ms step_avg:80.40ms +[2025-07-05 09:38:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:38:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:38:35] [Rank 0] PRINT: step:500/10000 train_loss:2.2947 val_loss:1.3663 train_time:40137ms step_avg:80.27ms +[2025-07-05 09:38:35] [Rank 0] PRINT: step:500/10000 train_loss:2.2947 val_loss:1.3663 train_time:40137ms step_avg:80.27ms +[2025-07-05 09:38:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:38:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/config.json b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..74a5a9f98025d9502ca434021ca9686b49a6264c --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "b696b9fb-3d99-4f51-a1bd-14b8dd34e28d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/training_log_b696b9fb-3d99-4f51-a1bd-14b8dd34e28d.txt b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/training_log_b696b9fb-3d99-4f51-a1bd-14b8dd34e28d.txt new file mode 100644 index 0000000000000000000000000000000000000000..804f6273ec5f810cdc15228e89e79bbd3ebb4faf --- /dev/null +++ b/logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51/training_log_b696b9fb-3d99-4f51-a1bd-14b8dd34e28d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:03:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:03:57 2025 --- +[2025-07-05 10:03:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:03:57 2025 --- +[2025-07-05 10:03:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:03:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:03:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:03:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:03:57] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:03:57] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:03:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51 +[2025-07-05 10:03:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_51 +[2025-07-05 10:03:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:03:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:03:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:03:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:03:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:03:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:03:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:03:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:03:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:03:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:03:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:03:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:04:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:04:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:04:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:04:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:04:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:04:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:04:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:04:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:04:00] [Rank 0] PRINT: Model returns: +[2025-07-05 10:04:00] [Rank 0] PRINT: Model returns: +[2025-07-05 10:04:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:04:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:04:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:04:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-05 10:04:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:04:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-05 10:04:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:04:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-05 10:04:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:04:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-05 10:04:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:04:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:04:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:04:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:04:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:04:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:05:06] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:05:06] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:05:06] [Rank 0] PRINT: Starting training... +[2025-07-05 10:05:06] [Rank 0] PRINT: Starting training... +[2025-07-05 10:05:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:13] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:05:13] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:05:15] [Rank 0] step:21/10000 train_time:1557ms step_avg:74.13ms +[2025-07-05 10:05:15] [Rank 0] step:21/10000 train_time:1557ms step_avg:74.13ms +[2025-07-05 10:05:16] [Rank 0] step:41/10000 train_time:3017ms step_avg:73.59ms +[2025-07-05 10:05:16] [Rank 0] step:41/10000 train_time:3017ms step_avg:73.59ms +[2025-07-05 10:05:18] [Rank 0] step:61/10000 train_time:4479ms step_avg:73.43ms +[2025-07-05 10:05:18] [Rank 0] step:61/10000 train_time:4479ms step_avg:73.43ms +[2025-07-05 10:05:19] [Rank 0] step:81/10000 train_time:5943ms step_avg:73.37ms +[2025-07-05 10:05:19] [Rank 0] step:81/10000 train_time:5943ms step_avg:73.37ms +[2025-07-05 10:05:21] [Rank 0] step:101/10000 train_time:7647ms step_avg:75.71ms +[2025-07-05 10:05:21] [Rank 0] step:101/10000 train_time:7647ms step_avg:75.71ms +[2025-07-05 10:05:22] [Rank 0] step:121/10000 train_time:9114ms step_avg:75.32ms +[2025-07-05 10:05:22] [Rank 0] step:121/10000 train_time:9114ms step_avg:75.32ms +[2025-07-05 10:05:24] [Rank 0] step:141/10000 train_time:10580ms step_avg:75.04ms +[2025-07-05 10:05:24] [Rank 0] step:141/10000 train_time:10580ms step_avg:75.04ms +[2025-07-05 10:05:25] [Rank 0] step:161/10000 train_time:12047ms step_avg:74.83ms +[2025-07-05 10:05:25] [Rank 0] step:161/10000 train_time:12047ms step_avg:74.83ms +[2025-07-05 10:05:27] [Rank 0] step:181/10000 train_time:14202ms step_avg:78.46ms +[2025-07-05 10:05:27] [Rank 0] step:181/10000 train_time:14202ms step_avg:78.46ms +[2025-07-05 10:05:29] [Rank 0] step:201/10000 train_time:15650ms step_avg:77.86ms +[2025-07-05 10:05:29] [Rank 0] step:201/10000 train_time:15650ms step_avg:77.86ms +[2025-07-05 10:05:30] [Rank 0] step:221/10000 train_time:17120ms step_avg:77.46ms +[2025-07-05 10:05:30] [Rank 0] step:221/10000 train_time:17120ms step_avg:77.46ms +[2025-07-05 10:05:32] [Rank 0] step:241/10000 train_time:18902ms step_avg:78.43ms +[2025-07-05 10:05:32] [Rank 0] step:241/10000 train_time:18902ms step_avg:78.43ms +[2025-07-05 10:05:34] [Rank 0] step:261/10000 train_time:20427ms step_avg:78.26ms +[2025-07-05 10:05:34] [Rank 0] step:261/10000 train_time:20427ms step_avg:78.26ms +[2025-07-05 10:05:36] [Rank 0] step:281/10000 train_time:22561ms step_avg:80.29ms +[2025-07-05 10:05:36] [Rank 0] step:281/10000 train_time:22561ms step_avg:80.29ms +[2025-07-05 10:05:37] [Rank 0] step:301/10000 train_time:24030ms step_avg:79.84ms +[2025-07-05 10:05:37] [Rank 0] step:301/10000 train_time:24030ms step_avg:79.84ms +[2025-07-05 10:05:39] [Rank 0] step:321/10000 train_time:25503ms step_avg:79.45ms +[2025-07-05 10:05:39] [Rank 0] step:321/10000 train_time:25503ms step_avg:79.45ms +[2025-07-05 10:05:40] [Rank 0] step:341/10000 train_time:26973ms step_avg:79.10ms +[2025-07-05 10:05:40] [Rank 0] step:341/10000 train_time:26973ms step_avg:79.10ms +[2025-07-05 10:05:42] [Rank 0] step:361/10000 train_time:28697ms step_avg:79.49ms +[2025-07-05 10:05:42] [Rank 0] step:361/10000 train_time:28697ms step_avg:79.49ms +[2025-07-05 10:05:44] [Rank 0] step:381/10000 train_time:30560ms step_avg:80.21ms +[2025-07-05 10:05:44] [Rank 0] step:381/10000 train_time:30560ms step_avg:80.21ms +[2025-07-05 10:05:45] [Rank 0] step:401/10000 train_time:32028ms step_avg:79.87ms +[2025-07-05 10:05:45] [Rank 0] step:401/10000 train_time:32028ms step_avg:79.87ms +[2025-07-05 10:05:47] [Rank 0] step:421/10000 train_time:33497ms step_avg:79.57ms +[2025-07-05 10:05:47] [Rank 0] step:421/10000 train_time:33497ms step_avg:79.57ms +[2025-07-05 10:05:48] [Rank 0] step:441/10000 train_time:34965ms step_avg:79.29ms +[2025-07-05 10:05:48] [Rank 0] step:441/10000 train_time:34965ms step_avg:79.29ms +[2025-07-05 10:05:50] [Rank 0] step:461/10000 train_time:37088ms step_avg:80.45ms +[2025-07-05 10:05:50] [Rank 0] step:461/10000 train_time:37088ms step_avg:80.45ms +[2025-07-05 10:05:52] [Rank 0] step:481/10000 train_time:38655ms step_avg:80.36ms +[2025-07-05 10:05:52] [Rank 0] step:481/10000 train_time:38655ms step_avg:80.36ms +[2025-07-05 10:05:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:05:54] [Rank 0] PRINT: step:500/10000 train_loss:2.2965 val_loss:1.3691 train_time:40123ms step_avg:80.25ms +[2025-07-05 10:05:54] [Rank 0] PRINT: step:500/10000 train_loss:2.2965 val_loss:1.3691 train_time:40123ms step_avg:80.25ms +[2025-07-05 10:05:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:05:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8958267615a8d216d3af1b96d74dcbcd10e15219 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "879a4ec4-7754-4b3e-88fa-ce021e5b6947", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_879a4ec4-7754-4b3e-88fa-ce021e5b6947.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_879a4ec4-7754-4b3e-88fa-ce021e5b6947.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d22cbfb0baa8bfd417d229addd155bc698666ee --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_879a4ec4-7754-4b3e-88fa-ce021e5b6947.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:30:52] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:30:52 2025 --- +[2025-07-05 08:30:52] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:30:52 2025 --- +[2025-07-05 08:30:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:30:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:30:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:30:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:30:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:30:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:30:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42 +[2025-07-05 08:30:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42 +[2025-07-05 08:30:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:30:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:30:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:30:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:30:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:30:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:30:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:30:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:30:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:30:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:30:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:30:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:30:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:30:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:30:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:30:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:30:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:30:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:30:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:30:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:30:55] [Rank 0] PRINT: Model returns: +[2025-07-05 08:30:55] [Rank 0] PRINT: Model returns: +[2025-07-05 08:30:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:30:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:30:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:30:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:30:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:30:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:30:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:30:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:30:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:30:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:30:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:30:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:30:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:30:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:30:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:30:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:32:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:32:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:32:01] [Rank 0] PRINT: Starting training... +[2025-07-05 08:32:01] [Rank 0] PRINT: Starting training... +[2025-07-05 08:32:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:32:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:32:09] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-05 08:32:09] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-05 08:32:11] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.27ms +[2025-07-05 08:32:11] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.27ms +[2025-07-05 08:32:12] [Rank 0] step:61/10000 train_time:3470ms step_avg:56.88ms +[2025-07-05 08:32:12] [Rank 0] step:61/10000 train_time:3470ms step_avg:56.88ms +[2025-07-05 08:32:13] [Rank 0] step:81/10000 train_time:4798ms step_avg:59.23ms +[2025-07-05 08:32:13] [Rank 0] step:81/10000 train_time:4798ms step_avg:59.23ms +[2025-07-05 08:32:15] [Rank 0] step:101/10000 train_time:6125ms step_avg:60.64ms +[2025-07-05 08:32:15] [Rank 0] step:101/10000 train_time:6125ms step_avg:60.64ms +[2025-07-05 08:32:16] [Rank 0] step:121/10000 train_time:7452ms step_avg:61.58ms +[2025-07-05 08:32:16] [Rank 0] step:121/10000 train_time:7452ms step_avg:61.58ms +[2025-07-05 08:32:17] [Rank 0] step:141/10000 train_time:8777ms step_avg:62.25ms +[2025-07-05 08:32:17] [Rank 0] step:141/10000 train_time:8777ms step_avg:62.25ms +[2025-07-05 08:32:19] [Rank 0] step:161/10000 train_time:10105ms step_avg:62.76ms +[2025-07-05 08:32:19] [Rank 0] step:161/10000 train_time:10105ms step_avg:62.76ms +[2025-07-05 08:32:20] [Rank 0] step:181/10000 train_time:11430ms step_avg:63.15ms +[2025-07-05 08:32:20] [Rank 0] step:181/10000 train_time:11430ms step_avg:63.15ms +[2025-07-05 08:32:21] [Rank 0] step:201/10000 train_time:12819ms step_avg:63.78ms +[2025-07-05 08:32:21] [Rank 0] step:201/10000 train_time:12819ms step_avg:63.78ms +[2025-07-05 08:32:23] [Rank 0] step:221/10000 train_time:14146ms step_avg:64.01ms +[2025-07-05 08:32:23] [Rank 0] step:221/10000 train_time:14146ms step_avg:64.01ms +[2025-07-05 08:32:24] [Rank 0] step:241/10000 train_time:15472ms step_avg:64.20ms +[2025-07-05 08:32:24] [Rank 0] step:241/10000 train_time:15472ms step_avg:64.20ms +[2025-07-05 08:32:25] [Rank 0] step:261/10000 train_time:16800ms step_avg:64.37ms +[2025-07-05 08:32:25] [Rank 0] step:261/10000 train_time:16800ms step_avg:64.37ms +[2025-07-05 08:32:27] [Rank 0] step:281/10000 train_time:18127ms step_avg:64.51ms +[2025-07-05 08:32:27] [Rank 0] step:281/10000 train_time:18127ms step_avg:64.51ms +[2025-07-05 08:32:28] [Rank 0] step:301/10000 train_time:19454ms step_avg:64.63ms +[2025-07-05 08:32:28] [Rank 0] step:301/10000 train_time:19454ms step_avg:64.63ms +[2025-07-05 08:32:29] [Rank 0] step:321/10000 train_time:20781ms step_avg:64.74ms +[2025-07-05 08:32:29] [Rank 0] step:321/10000 train_time:20781ms step_avg:64.74ms +[2025-07-05 08:32:31] [Rank 0] step:341/10000 train_time:22106ms step_avg:64.83ms +[2025-07-05 08:32:31] [Rank 0] step:341/10000 train_time:22106ms step_avg:64.83ms +[2025-07-05 08:32:32] [Rank 0] step:361/10000 train_time:23433ms step_avg:64.91ms +[2025-07-05 08:32:32] [Rank 0] step:361/10000 train_time:23433ms step_avg:64.91ms +[2025-07-05 08:32:33] [Rank 0] step:381/10000 train_time:24815ms step_avg:65.13ms +[2025-07-05 08:32:33] [Rank 0] step:381/10000 train_time:24815ms step_avg:65.13ms +[2025-07-05 08:32:35] [Rank 0] step:401/10000 train_time:26143ms step_avg:65.20ms +[2025-07-05 08:32:35] [Rank 0] step:401/10000 train_time:26143ms step_avg:65.20ms +[2025-07-05 08:32:36] [Rank 0] step:421/10000 train_time:27471ms step_avg:65.25ms +[2025-07-05 08:32:36] [Rank 0] step:421/10000 train_time:27471ms step_avg:65.25ms +[2025-07-05 08:32:37] [Rank 0] step:441/10000 train_time:28797ms step_avg:65.30ms +[2025-07-05 08:32:37] [Rank 0] step:441/10000 train_time:28797ms step_avg:65.30ms +[2025-07-05 08:32:39] [Rank 0] step:461/10000 train_time:30123ms step_avg:65.34ms +[2025-07-05 08:32:39] [Rank 0] step:461/10000 train_time:30123ms step_avg:65.34ms +[2025-07-05 08:32:40] [Rank 0] step:481/10000 train_time:31449ms step_avg:65.38ms +[2025-07-05 08:32:40] [Rank 0] step:481/10000 train_time:31449ms step_avg:65.38ms +[2025-07-05 08:32:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:42] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5996 train_time:33378ms step_avg:66.76ms +[2025-07-05 08:32:42] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5996 train_time:33378ms step_avg:66.76ms +[2025-07-05 08:32:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:32:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8bcd7b0a1e7eaa963cbfa61f2caed82bf47fd58a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "56dab467-9467-4502-a64e-86a5573fdaa5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/training_log_56dab467-9467-4502-a64e-86a5573fdaa5.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/training_log_56dab467-9467-4502-a64e-86a5573fdaa5.txt new file mode 100644 index 0000000000000000000000000000000000000000..be69f9bab513ce1560290f2d4a6f40cdbab7ee7c --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43/training_log_56dab467-9467-4502-a64e-86a5573fdaa5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:58:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:58:03 2025 --- +[2025-07-05 08:58:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:58:03 2025 --- +[2025-07-05 08:58:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:58:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:58:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:58:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:58:03] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:58:03] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:58:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43 +[2025-07-05 08:58:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_43 +[2025-07-05 08:58:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:58:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:58:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:58:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:58:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:58:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:58:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:58:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:58:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:58:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:58:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:58:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:58:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:58:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:58:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:58:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:58:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:58:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:58:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:58:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:58:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:58:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:58:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:58:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:58:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:58:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:58:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:58:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:58:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:58:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:58:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:58:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:58:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:58:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:58:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:58:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:58:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:58:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:59:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:59:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:59:09] [Rank 0] PRINT: Starting training... +[2025-07-05 08:59:09] [Rank 0] PRINT: Starting training... +[2025-07-05 08:59:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:59:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:59:18] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:59:18] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:59:19] [Rank 0] step:41/10000 train_time:2372ms step_avg:57.86ms +[2025-07-05 08:59:19] [Rank 0] step:41/10000 train_time:2372ms step_avg:57.86ms +[2025-07-05 08:59:20] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.27ms +[2025-07-05 08:59:20] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.27ms +[2025-07-05 08:59:22] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.76ms +[2025-07-05 08:59:22] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.76ms +[2025-07-05 08:59:23] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.68ms +[2025-07-05 08:59:23] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.68ms +[2025-07-05 08:59:24] [Rank 0] step:121/10000 train_time:7659ms step_avg:63.30ms +[2025-07-05 08:59:24] [Rank 0] step:121/10000 train_time:7659ms step_avg:63.30ms +[2025-07-05 08:59:25] [Rank 0] step:141/10000 train_time:8986ms step_avg:63.73ms +[2025-07-05 08:59:25] [Rank 0] step:141/10000 train_time:8986ms step_avg:63.73ms +[2025-07-05 08:59:27] [Rank 0] step:161/10000 train_time:10315ms step_avg:64.07ms +[2025-07-05 08:59:27] [Rank 0] step:161/10000 train_time:10315ms step_avg:64.07ms +[2025-07-05 08:59:28] [Rank 0] step:181/10000 train_time:12411ms step_avg:68.57ms +[2025-07-05 08:59:28] [Rank 0] step:181/10000 train_time:12411ms step_avg:68.57ms +[2025-07-05 08:59:30] [Rank 0] step:201/10000 train_time:13228ms step_avg:65.81ms +[2025-07-05 08:59:30] [Rank 0] step:201/10000 train_time:13228ms step_avg:65.81ms +[2025-07-05 08:59:31] [Rank 0] step:221/10000 train_time:14556ms step_avg:65.87ms +[2025-07-05 08:59:31] [Rank 0] step:221/10000 train_time:14556ms step_avg:65.87ms +[2025-07-05 08:59:32] [Rank 0] step:241/10000 train_time:15884ms step_avg:65.91ms +[2025-07-05 08:59:32] [Rank 0] step:241/10000 train_time:15884ms step_avg:65.91ms +[2025-07-05 08:59:34] [Rank 0] step:261/10000 train_time:17213ms step_avg:65.95ms +[2025-07-05 08:59:34] [Rank 0] step:261/10000 train_time:17213ms step_avg:65.95ms +[2025-07-05 08:59:35] [Rank 0] step:281/10000 train_time:18543ms step_avg:65.99ms +[2025-07-05 08:59:35] [Rank 0] step:281/10000 train_time:18543ms step_avg:65.99ms +[2025-07-05 08:59:36] [Rank 0] step:301/10000 train_time:19873ms step_avg:66.02ms +[2025-07-05 08:59:36] [Rank 0] step:301/10000 train_time:19873ms step_avg:66.02ms +[2025-07-05 08:59:38] [Rank 0] step:321/10000 train_time:21200ms step_avg:66.04ms +[2025-07-05 08:59:38] [Rank 0] step:321/10000 train_time:21200ms step_avg:66.04ms +[2025-07-05 08:59:39] [Rank 0] step:341/10000 train_time:22528ms step_avg:66.06ms +[2025-07-05 08:59:39] [Rank 0] step:341/10000 train_time:22528ms step_avg:66.06ms +[2025-07-05 08:59:40] [Rank 0] step:361/10000 train_time:24109ms step_avg:66.78ms +[2025-07-05 08:59:40] [Rank 0] step:361/10000 train_time:24109ms step_avg:66.78ms +[2025-07-05 08:59:42] [Rank 0] step:381/10000 train_time:25254ms step_avg:66.28ms +[2025-07-05 08:59:42] [Rank 0] step:381/10000 train_time:25254ms step_avg:66.28ms +[2025-07-05 08:59:43] [Rank 0] step:401/10000 train_time:26583ms step_avg:66.29ms +[2025-07-05 08:59:43] [Rank 0] step:401/10000 train_time:26583ms step_avg:66.29ms +[2025-07-05 08:59:44] [Rank 0] step:421/10000 train_time:27911ms step_avg:66.30ms +[2025-07-05 08:59:44] [Rank 0] step:421/10000 train_time:27911ms step_avg:66.30ms +[2025-07-05 08:59:46] [Rank 0] step:441/10000 train_time:29239ms step_avg:66.30ms +[2025-07-05 08:59:46] [Rank 0] step:441/10000 train_time:29239ms step_avg:66.30ms +[2025-07-05 08:59:47] [Rank 0] step:461/10000 train_time:30567ms step_avg:66.31ms +[2025-07-05 08:59:47] [Rank 0] step:461/10000 train_time:30567ms step_avg:66.31ms +[2025-07-05 08:59:48] [Rank 0] step:481/10000 train_time:31894ms step_avg:66.31ms +[2025-07-05 08:59:48] [Rank 0] step:481/10000 train_time:31894ms step_avg:66.31ms +[2025-07-05 08:59:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:51] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33824ms step_avg:67.65ms +[2025-07-05 08:59:51] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33824ms step_avg:67.65ms +[2025-07-05 08:59:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:59:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b6cbe5210b7e907a4b407792048e85886d8d899d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "daf0fbb3-a899-4e8c-b59d-851e513ec90f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/training_log_daf0fbb3-a899-4e8c-b59d-851e513ec90f.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/training_log_daf0fbb3-a899-4e8c-b59d-851e513ec90f.txt new file mode 100644 index 0000000000000000000000000000000000000000..071f374e0bbe64d53ee72b0513719b262f42c6d7 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44/training_log_daf0fbb3-a899-4e8c-b59d-851e513ec90f.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:25:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:25:40 2025 --- +[2025-07-05 09:25:40] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:25:40 2025 --- +[2025-07-05 09:25:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:25:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:25:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:25:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:25:40] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:25:40] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:25:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44 +[2025-07-05 09:25:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_44 +[2025-07-05 09:25:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:25:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:25:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:25:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:25:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:25:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:25:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:25:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:25:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:25:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:25:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:25:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:25:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:25:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:25:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:25:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:25:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:25:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:25:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:25:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:25:44] [Rank 0] PRINT: Model returns: +[2025-07-05 09:25:44] [Rank 0] PRINT: Model returns: +[2025-07-05 09:25:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:25:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:25:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:25:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:25:44] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:25:44] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:25:44] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:25:44] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:25:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:25:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:25:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:25:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:25:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:25:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:25:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:25:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:26:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:26:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:26:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:26:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:26:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:26:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:26:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:26:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:26:56] [Rank 0] step:21/10000 train_time:821ms step_avg:39.11ms +[2025-07-05 09:26:56] [Rank 0] step:21/10000 train_time:821ms step_avg:39.11ms +[2025-07-05 09:26:58] [Rank 0] step:41/10000 train_time:2249ms step_avg:54.85ms +[2025-07-05 09:26:58] [Rank 0] step:41/10000 train_time:2249ms step_avg:54.85ms +[2025-07-05 09:26:59] [Rank 0] step:61/10000 train_time:3573ms step_avg:58.58ms +[2025-07-05 09:26:59] [Rank 0] step:61/10000 train_time:3573ms step_avg:58.58ms +[2025-07-05 09:27:00] [Rank 0] step:81/10000 train_time:4901ms step_avg:60.50ms +[2025-07-05 09:27:00] [Rank 0] step:81/10000 train_time:4901ms step_avg:60.50ms +[2025-07-05 09:27:02] [Rank 0] step:101/10000 train_time:6228ms step_avg:61.66ms +[2025-07-05 09:27:02] [Rank 0] step:101/10000 train_time:6228ms step_avg:61.66ms +[2025-07-05 09:27:03] [Rank 0] step:121/10000 train_time:7555ms step_avg:62.44ms +[2025-07-05 09:27:03] [Rank 0] step:121/10000 train_time:7555ms step_avg:62.44ms +[2025-07-05 09:27:04] [Rank 0] step:141/10000 train_time:8984ms step_avg:63.72ms +[2025-07-05 09:27:04] [Rank 0] step:141/10000 train_time:8984ms step_avg:63.72ms +[2025-07-05 09:27:06] [Rank 0] step:161/10000 train_time:10312ms step_avg:64.05ms +[2025-07-05 09:27:06] [Rank 0] step:161/10000 train_time:10312ms step_avg:64.05ms +[2025-07-05 09:27:07] [Rank 0] step:181/10000 train_time:11640ms step_avg:64.31ms +[2025-07-05 09:27:07] [Rank 0] step:181/10000 train_time:11640ms step_avg:64.31ms +[2025-07-05 09:27:09] [Rank 0] step:201/10000 train_time:13121ms step_avg:65.28ms +[2025-07-05 09:27:09] [Rank 0] step:201/10000 train_time:13121ms step_avg:65.28ms +[2025-07-05 09:27:10] [Rank 0] step:221/10000 train_time:14449ms step_avg:65.38ms +[2025-07-05 09:27:10] [Rank 0] step:221/10000 train_time:14449ms step_avg:65.38ms +[2025-07-05 09:27:11] [Rank 0] step:241/10000 train_time:15778ms step_avg:65.47ms +[2025-07-05 09:27:11] [Rank 0] step:241/10000 train_time:15778ms step_avg:65.47ms +[2025-07-05 09:27:13] [Rank 0] step:261/10000 train_time:17114ms step_avg:65.57ms +[2025-07-05 09:27:13] [Rank 0] step:261/10000 train_time:17114ms step_avg:65.57ms +[2025-07-05 09:27:14] [Rank 0] step:281/10000 train_time:18445ms step_avg:65.64ms +[2025-07-05 09:27:14] [Rank 0] step:281/10000 train_time:18445ms step_avg:65.64ms +[2025-07-05 09:27:15] [Rank 0] step:301/10000 train_time:19774ms step_avg:65.70ms +[2025-07-05 09:27:15] [Rank 0] step:301/10000 train_time:19774ms step_avg:65.70ms +[2025-07-05 09:27:17] [Rank 0] step:321/10000 train_time:21101ms step_avg:65.74ms +[2025-07-05 09:27:17] [Rank 0] step:321/10000 train_time:21101ms step_avg:65.74ms +[2025-07-05 09:27:18] [Rank 0] step:341/10000 train_time:22430ms step_avg:65.78ms +[2025-07-05 09:27:18] [Rank 0] step:341/10000 train_time:22430ms step_avg:65.78ms +[2025-07-05 09:27:19] [Rank 0] step:361/10000 train_time:23804ms step_avg:65.94ms +[2025-07-05 09:27:19] [Rank 0] step:361/10000 train_time:23804ms step_avg:65.94ms +[2025-07-05 09:27:21] [Rank 0] step:381/10000 train_time:25145ms step_avg:66.00ms +[2025-07-05 09:27:21] [Rank 0] step:381/10000 train_time:25145ms step_avg:66.00ms +[2025-07-05 09:27:22] [Rank 0] step:401/10000 train_time:26476ms step_avg:66.02ms +[2025-07-05 09:27:22] [Rank 0] step:401/10000 train_time:26476ms step_avg:66.02ms +[2025-07-05 09:27:23] [Rank 0] step:421/10000 train_time:27804ms step_avg:66.04ms +[2025-07-05 09:27:23] [Rank 0] step:421/10000 train_time:27804ms step_avg:66.04ms +[2025-07-05 09:27:25] [Rank 0] step:441/10000 train_time:29136ms step_avg:66.07ms +[2025-07-05 09:27:25] [Rank 0] step:441/10000 train_time:29136ms step_avg:66.07ms +[2025-07-05 09:27:26] [Rank 0] step:461/10000 train_time:30465ms step_avg:66.08ms +[2025-07-05 09:27:26] [Rank 0] step:461/10000 train_time:30465ms step_avg:66.08ms +[2025-07-05 09:27:27] [Rank 0] step:481/10000 train_time:31796ms step_avg:66.10ms +[2025-07-05 09:27:27] [Rank 0] step:481/10000 train_time:31796ms step_avg:66.10ms +[2025-07-05 09:27:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:27:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:27:29] [Rank 0] PRINT: step:500/10000 train_loss:9.6505 val_loss:8.5998 train_time:33732ms step_avg:67.46ms +[2025-07-05 09:27:29] [Rank 0] PRINT: step:500/10000 train_loss:9.6505 val_loss:8.5998 train_time:33732ms step_avg:67.46ms +[2025-07-05 09:27:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:27:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1682f4fcc6c502f65a25d5f3b4c7b40fdcc80a0d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "acc5912b-a67c-4ba5-804b-f8e2955dc0a9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_acc5912b-a67c-4ba5-804b-f8e2955dc0a9.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_acc5912b-a67c-4ba5-804b-f8e2955dc0a9.txt new file mode 100644 index 0000000000000000000000000000000000000000..f67e2211b1608211b0e2429bffee1be0db1a1176 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_acc5912b-a67c-4ba5-804b-f8e2955dc0a9.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:52:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:52:43 2025 --- +[2025-07-05 09:52:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:52:43 2025 --- +[2025-07-05 09:52:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:52:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:52:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:52:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:52:43] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:52:43] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:52:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45 +[2025-07-05 09:52:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45 +[2025-07-05 09:52:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:52:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:52:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:52:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:52:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:52:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:52:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:52:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:52:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:52:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:52:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:52:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:52:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:52:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:52:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:52:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:52:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:52:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:52:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:52:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:52:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:52:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:52:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:52:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:52:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:52:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:52:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:52:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:52:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:52:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:52:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:52:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:52:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:52:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:52:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:52:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:52:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:52:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:53:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:53:51] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:53:51] [Rank 0] PRINT: Starting training... +[2025-07-05 09:53:51] [Rank 0] PRINT: Starting training... +[2025-07-05 09:53:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:53:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:53:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:53:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:54:01] [Rank 0] step:21/10000 train_time:1203ms step_avg:57.30ms +[2025-07-05 09:54:01] [Rank 0] step:21/10000 train_time:1203ms step_avg:57.30ms +[2025-07-05 09:54:02] [Rank 0] step:41/10000 train_time:2631ms step_avg:64.18ms +[2025-07-05 09:54:02] [Rank 0] step:41/10000 train_time:2631ms step_avg:64.18ms +[2025-07-05 09:54:04] [Rank 0] step:61/10000 train_time:3960ms step_avg:64.91ms +[2025-07-05 09:54:04] [Rank 0] step:61/10000 train_time:3960ms step_avg:64.91ms +[2025-07-05 09:54:05] [Rank 0] step:81/10000 train_time:5288ms step_avg:65.28ms +[2025-07-05 09:54:05] [Rank 0] step:81/10000 train_time:5288ms step_avg:65.28ms +[2025-07-05 09:54:06] [Rank 0] step:101/10000 train_time:6615ms step_avg:65.49ms +[2025-07-05 09:54:06] [Rank 0] step:101/10000 train_time:6615ms step_avg:65.49ms +[2025-07-05 09:54:08] [Rank 0] step:121/10000 train_time:8087ms step_avg:66.84ms +[2025-07-05 09:54:08] [Rank 0] step:121/10000 train_time:8087ms step_avg:66.84ms +[2025-07-05 09:54:09] [Rank 0] step:141/10000 train_time:9270ms step_avg:65.74ms +[2025-07-05 09:54:09] [Rank 0] step:141/10000 train_time:9270ms step_avg:65.74ms +[2025-07-05 09:54:10] [Rank 0] step:161/10000 train_time:10598ms step_avg:65.82ms +[2025-07-05 09:54:10] [Rank 0] step:161/10000 train_time:10598ms step_avg:65.82ms +[2025-07-05 09:54:12] [Rank 0] step:181/10000 train_time:11974ms step_avg:66.15ms +[2025-07-05 09:54:12] [Rank 0] step:181/10000 train_time:11974ms step_avg:66.15ms +[2025-07-05 09:54:13] [Rank 0] step:201/10000 train_time:13298ms step_avg:66.16ms +[2025-07-05 09:54:13] [Rank 0] step:201/10000 train_time:13298ms step_avg:66.16ms +[2025-07-05 09:54:14] [Rank 0] step:221/10000 train_time:14627ms step_avg:66.18ms +[2025-07-05 09:54:14] [Rank 0] step:221/10000 train_time:14627ms step_avg:66.18ms +[2025-07-05 09:54:16] [Rank 0] step:241/10000 train_time:15955ms step_avg:66.20ms +[2025-07-05 09:54:16] [Rank 0] step:241/10000 train_time:15955ms step_avg:66.20ms +[2025-07-05 09:54:17] [Rank 0] step:261/10000 train_time:17285ms step_avg:66.23ms +[2025-07-05 09:54:17] [Rank 0] step:261/10000 train_time:17285ms step_avg:66.23ms +[2025-07-05 09:54:18] [Rank 0] step:281/10000 train_time:18614ms step_avg:66.24ms +[2025-07-05 09:54:18] [Rank 0] step:281/10000 train_time:18614ms step_avg:66.24ms +[2025-07-05 09:54:20] [Rank 0] step:301/10000 train_time:19944ms step_avg:66.26ms +[2025-07-05 09:54:20] [Rank 0] step:301/10000 train_time:19944ms step_avg:66.26ms +[2025-07-05 09:54:21] [Rank 0] step:321/10000 train_time:21274ms step_avg:66.27ms +[2025-07-05 09:54:21] [Rank 0] step:321/10000 train_time:21274ms step_avg:66.27ms +[2025-07-05 09:54:22] [Rank 0] step:341/10000 train_time:22603ms step_avg:66.28ms +[2025-07-05 09:54:22] [Rank 0] step:341/10000 train_time:22603ms step_avg:66.28ms +[2025-07-05 09:54:24] [Rank 0] step:361/10000 train_time:24185ms step_avg:67.00ms +[2025-07-05 09:54:24] [Rank 0] step:361/10000 train_time:24185ms step_avg:67.00ms +[2025-07-05 09:54:25] [Rank 0] step:381/10000 train_time:25260ms step_avg:66.30ms +[2025-07-05 09:54:25] [Rank 0] step:381/10000 train_time:25260ms step_avg:66.30ms +[2025-07-05 09:54:26] [Rank 0] step:401/10000 train_time:26591ms step_avg:66.31ms +[2025-07-05 09:54:26] [Rank 0] step:401/10000 train_time:26591ms step_avg:66.31ms +[2025-07-05 09:54:28] [Rank 0] step:421/10000 train_time:27918ms step_avg:66.31ms +[2025-07-05 09:54:28] [Rank 0] step:421/10000 train_time:27918ms step_avg:66.31ms +[2025-07-05 09:54:29] [Rank 0] step:441/10000 train_time:29245ms step_avg:66.32ms +[2025-07-05 09:54:29] [Rank 0] step:441/10000 train_time:29245ms step_avg:66.32ms +[2025-07-05 09:54:30] [Rank 0] step:461/10000 train_time:30572ms step_avg:66.32ms +[2025-07-05 09:54:30] [Rank 0] step:461/10000 train_time:30572ms step_avg:66.32ms +[2025-07-05 09:54:32] [Rank 0] step:481/10000 train_time:31901ms step_avg:66.32ms +[2025-07-05 09:54:32] [Rank 0] step:481/10000 train_time:31901ms step_avg:66.32ms +[2025-07-05 09:54:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:34] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33832ms step_avg:67.66ms +[2025-07-05 09:54:34] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33832ms step_avg:67.66ms +[2025-07-05 09:54:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:54:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7be2a1ce954536a50a9568e11500e72600034efa --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "49f68c02-93d6-41fa-b325-9ff4a09940ff", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/training_log_49f68c02-93d6-41fa-b325-9ff4a09940ff.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/training_log_49f68c02-93d6-41fa-b325-9ff4a09940ff.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8ed77bd4a4cb2466bf584dbd394f198c6a1395a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46/training_log_49f68c02-93d6-41fa-b325-9ff4a09940ff.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:20:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:20:03 2025 --- +[2025-07-05 10:20:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:20:03 2025 --- +[2025-07-05 10:20:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:20:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:20:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:20:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:20:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:20:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:20:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46 +[2025-07-05 10:20:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_46 +[2025-07-05 10:20:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:20:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:20:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:20:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:20:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:20:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:20:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:20:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:20:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:20:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:20:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:20:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:20:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:20:06] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:20:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:20:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:20:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:20:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:20:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:20:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:20:06] [Rank 0] PRINT: Model returns: +[2025-07-05 10:20:06] [Rank 0] PRINT: Model returns: +[2025-07-05 10:20:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:20:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:20:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:20:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:20:06] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:20:06] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:20:06] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:20:06] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:20:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:20:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:20:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:20:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:20:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:20:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:20:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:20:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:21:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:21:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:21:11] [Rank 0] PRINT: Starting training... +[2025-07-05 10:21:11] [Rank 0] PRINT: Starting training... +[2025-07-05 10:21:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:21:18] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:21:19] [Rank 0] step:21/10000 train_time:817ms step_avg:38.90ms +[2025-07-05 10:21:19] [Rank 0] step:21/10000 train_time:817ms step_avg:38.90ms +[2025-07-05 10:21:21] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.21ms +[2025-07-05 10:21:21] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.21ms +[2025-07-05 10:21:22] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.82ms +[2025-07-05 10:21:22] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.82ms +[2025-07-05 10:21:23] [Rank 0] step:81/10000 train_time:4792ms step_avg:59.16ms +[2025-07-05 10:21:23] [Rank 0] step:81/10000 train_time:4792ms step_avg:59.16ms +[2025-07-05 10:21:25] [Rank 0] step:101/10000 train_time:6119ms step_avg:60.59ms +[2025-07-05 10:21:25] [Rank 0] step:101/10000 train_time:6119ms step_avg:60.59ms +[2025-07-05 10:21:26] [Rank 0] step:121/10000 train_time:7447ms step_avg:61.54ms +[2025-07-05 10:21:26] [Rank 0] step:121/10000 train_time:7447ms step_avg:61.54ms +[2025-07-05 10:21:27] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 10:21:27] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 10:21:29] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 10:21:29] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 10:21:30] [Rank 0] step:181/10000 train_time:11427ms step_avg:63.13ms +[2025-07-05 10:21:30] [Rank 0] step:181/10000 train_time:11427ms step_avg:63.13ms +[2025-07-05 10:21:31] [Rank 0] step:201/10000 train_time:12753ms step_avg:63.45ms +[2025-07-05 10:21:31] [Rank 0] step:201/10000 train_time:12753ms step_avg:63.45ms +[2025-07-05 10:21:33] [Rank 0] step:221/10000 train_time:14088ms step_avg:63.75ms +[2025-07-05 10:21:33] [Rank 0] step:221/10000 train_time:14088ms step_avg:63.75ms +[2025-07-05 10:21:34] [Rank 0] step:241/10000 train_time:15415ms step_avg:63.96ms +[2025-07-05 10:21:34] [Rank 0] step:241/10000 train_time:15415ms step_avg:63.96ms +[2025-07-05 10:21:35] [Rank 0] step:261/10000 train_time:16768ms step_avg:64.24ms +[2025-07-05 10:21:35] [Rank 0] step:261/10000 train_time:16768ms step_avg:64.24ms +[2025-07-05 10:21:37] [Rank 0] step:281/10000 train_time:18096ms step_avg:64.40ms +[2025-07-05 10:21:37] [Rank 0] step:281/10000 train_time:18096ms step_avg:64.40ms +[2025-07-05 10:21:38] [Rank 0] step:301/10000 train_time:19424ms step_avg:64.53ms +[2025-07-05 10:21:38] [Rank 0] step:301/10000 train_time:19424ms step_avg:64.53ms +[2025-07-05 10:21:39] [Rank 0] step:321/10000 train_time:20752ms step_avg:64.65ms +[2025-07-05 10:21:39] [Rank 0] step:321/10000 train_time:20752ms step_avg:64.65ms +[2025-07-05 10:21:41] [Rank 0] step:341/10000 train_time:22079ms step_avg:64.75ms +[2025-07-05 10:21:41] [Rank 0] step:341/10000 train_time:22079ms step_avg:64.75ms +[2025-07-05 10:21:42] [Rank 0] step:361/10000 train_time:23408ms step_avg:64.84ms +[2025-07-05 10:21:42] [Rank 0] step:361/10000 train_time:23408ms step_avg:64.84ms +[2025-07-05 10:21:43] [Rank 0] step:381/10000 train_time:24736ms step_avg:64.92ms +[2025-07-05 10:21:43] [Rank 0] step:381/10000 train_time:24736ms step_avg:64.92ms +[2025-07-05 10:21:45] [Rank 0] step:401/10000 train_time:26064ms step_avg:65.00ms +[2025-07-05 10:21:45] [Rank 0] step:401/10000 train_time:26064ms step_avg:65.00ms +[2025-07-05 10:21:46] [Rank 0] step:421/10000 train_time:27394ms step_avg:65.07ms +[2025-07-05 10:21:46] [Rank 0] step:421/10000 train_time:27394ms step_avg:65.07ms +[2025-07-05 10:21:47] [Rank 0] step:441/10000 train_time:28721ms step_avg:65.13ms +[2025-07-05 10:21:47] [Rank 0] step:441/10000 train_time:28721ms step_avg:65.13ms +[2025-07-05 10:21:49] [Rank 0] step:461/10000 train_time:30049ms step_avg:65.18ms +[2025-07-05 10:21:49] [Rank 0] step:461/10000 train_time:30049ms step_avg:65.18ms +[2025-07-05 10:21:50] [Rank 0] step:481/10000 train_time:31378ms step_avg:65.23ms +[2025-07-05 10:21:50] [Rank 0] step:481/10000 train_time:31378ms step_avg:65.23ms +[2025-07-05 10:21:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:52] [Rank 0] PRINT: step:500/10000 train_loss:9.6507 val_loss:8.6000 train_time:33309ms step_avg:66.62ms +[2025-07-05 10:21:52] [Rank 0] PRINT: step:500/10000 train_loss:9.6507 val_loss:8.6000 train_time:33309ms step_avg:66.62ms +[2025-07-05 10:21:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:21:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..529746ca987569aab1d1213314e2cff8a9f2ba25 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "fdbd75a2-0c85-4a04-bfe6-681cf15f4349", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/training_log_fdbd75a2-0c85-4a04-bfe6-681cf15f4349.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/training_log_fdbd75a2-0c85-4a04-bfe6-681cf15f4349.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dc506706dd2d6fa54af89b16572c48565d540b5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47/training_log_fdbd75a2-0c85-4a04-bfe6-681cf15f4349.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:39:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:39:27 2025 --- +[2025-07-05 08:39:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:39:27 2025 --- +[2025-07-05 08:39:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:39:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 08:39:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:39:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:39:27] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:39:27] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:39:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47 +[2025-07-05 08:39:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_47 +[2025-07-05 08:39:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:39:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:39:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:39:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:39:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:39:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:39:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:39:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:39:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:39:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:39:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:39:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:39:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:39:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:39:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:39:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:39:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:39:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:39:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:39:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:39:31] [Rank 0] PRINT: Model returns: +[2025-07-05 08:39:31] [Rank 0] PRINT: Model returns: +[2025-07-05 08:39:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:39:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:39:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:39:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:39:31] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:39:31] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 08:39:31] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:39:31] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:39:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:39:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:39:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:39:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:39:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:39:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:39:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:39:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:40:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:40:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:40:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:40:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:40:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:40:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:40:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:40:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:40:45] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.64ms +[2025-07-05 08:40:45] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.64ms +[2025-07-05 08:40:46] [Rank 0] step:41/10000 train_time:2345ms step_avg:57.19ms +[2025-07-05 08:40:46] [Rank 0] step:41/10000 train_time:2345ms step_avg:57.19ms +[2025-07-05 08:40:48] [Rank 0] step:61/10000 train_time:3668ms step_avg:60.13ms +[2025-07-05 08:40:48] [Rank 0] step:61/10000 train_time:3668ms step_avg:60.13ms +[2025-07-05 08:40:49] [Rank 0] step:81/10000 train_time:4993ms step_avg:61.64ms +[2025-07-05 08:40:49] [Rank 0] step:81/10000 train_time:4993ms step_avg:61.64ms +[2025-07-05 08:40:50] [Rank 0] step:101/10000 train_time:6317ms step_avg:62.54ms +[2025-07-05 08:40:50] [Rank 0] step:101/10000 train_time:6317ms step_avg:62.54ms +[2025-07-05 08:40:52] [Rank 0] step:121/10000 train_time:7641ms step_avg:63.15ms +[2025-07-05 08:40:52] [Rank 0] step:121/10000 train_time:7641ms step_avg:63.15ms +[2025-07-05 08:40:53] [Rank 0] step:141/10000 train_time:8965ms step_avg:63.58ms +[2025-07-05 08:40:53] [Rank 0] step:141/10000 train_time:8965ms step_avg:63.58ms +[2025-07-05 08:40:54] [Rank 0] step:161/10000 train_time:10289ms step_avg:63.91ms +[2025-07-05 08:40:54] [Rank 0] step:161/10000 train_time:10289ms step_avg:63.91ms +[2025-07-05 08:40:56] [Rank 0] step:181/10000 train_time:11664ms step_avg:64.44ms +[2025-07-05 08:40:56] [Rank 0] step:181/10000 train_time:11664ms step_avg:64.44ms +[2025-07-05 08:40:57] [Rank 0] step:201/10000 train_time:12988ms step_avg:64.61ms +[2025-07-05 08:40:57] [Rank 0] step:201/10000 train_time:12988ms step_avg:64.61ms +[2025-07-05 08:40:58] [Rank 0] step:221/10000 train_time:14312ms step_avg:64.76ms +[2025-07-05 08:40:58] [Rank 0] step:221/10000 train_time:14312ms step_avg:64.76ms +[2025-07-05 08:41:00] [Rank 0] step:241/10000 train_time:15638ms step_avg:64.89ms +[2025-07-05 08:41:00] [Rank 0] step:241/10000 train_time:15638ms step_avg:64.89ms +[2025-07-05 08:41:01] [Rank 0] step:261/10000 train_time:16964ms step_avg:65.00ms +[2025-07-05 08:41:01] [Rank 0] step:261/10000 train_time:16964ms step_avg:65.00ms +[2025-07-05 08:41:02] [Rank 0] step:281/10000 train_time:18292ms step_avg:65.09ms +[2025-07-05 08:41:02] [Rank 0] step:281/10000 train_time:18292ms step_avg:65.09ms +[2025-07-05 08:41:04] [Rank 0] step:301/10000 train_time:19618ms step_avg:65.18ms +[2025-07-05 08:41:04] [Rank 0] step:301/10000 train_time:19618ms step_avg:65.18ms +[2025-07-05 08:41:05] [Rank 0] step:321/10000 train_time:20945ms step_avg:65.25ms +[2025-07-05 08:41:05] [Rank 0] step:321/10000 train_time:20945ms step_avg:65.25ms +[2025-07-05 08:41:06] [Rank 0] step:341/10000 train_time:22271ms step_avg:65.31ms +[2025-07-05 08:41:06] [Rank 0] step:341/10000 train_time:22271ms step_avg:65.31ms +[2025-07-05 08:41:08] [Rank 0] step:361/10000 train_time:23649ms step_avg:65.51ms +[2025-07-05 08:41:08] [Rank 0] step:361/10000 train_time:23649ms step_avg:65.51ms +[2025-07-05 08:41:09] [Rank 0] step:381/10000 train_time:24987ms step_avg:65.58ms +[2025-07-05 08:41:09] [Rank 0] step:381/10000 train_time:24987ms step_avg:65.58ms +[2025-07-05 08:41:10] [Rank 0] step:401/10000 train_time:26316ms step_avg:65.63ms +[2025-07-05 08:41:10] [Rank 0] step:401/10000 train_time:26316ms step_avg:65.63ms +[2025-07-05 08:41:12] [Rank 0] step:421/10000 train_time:27643ms step_avg:65.66ms +[2025-07-05 08:41:12] [Rank 0] step:421/10000 train_time:27643ms step_avg:65.66ms +[2025-07-05 08:41:13] [Rank 0] step:441/10000 train_time:29025ms step_avg:65.82ms +[2025-07-05 08:41:13] [Rank 0] step:441/10000 train_time:29025ms step_avg:65.82ms +[2025-07-05 08:41:14] [Rank 0] step:461/10000 train_time:30352ms step_avg:65.84ms +[2025-07-05 08:41:14] [Rank 0] step:461/10000 train_time:30352ms step_avg:65.84ms +[2025-07-05 08:41:16] [Rank 0] step:481/10000 train_time:31680ms step_avg:65.86ms +[2025-07-05 08:41:16] [Rank 0] step:481/10000 train_time:31680ms step_avg:65.86ms +[2025-07-05 08:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:41:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:41:18] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33613ms step_avg:67.23ms +[2025-07-05 08:41:18] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33613ms step_avg:67.23ms +[2025-07-05 08:41:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:41:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..65ccdd59d6f425bfe7a55770dddc4f5f99a64280 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "4896981b-936c-4b0b-96ea-f86f50e80639", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_4896981b-936c-4b0b-96ea-f86f50e80639.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_4896981b-936c-4b0b-96ea-f86f50e80639.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f8d143b7a771012d6f97293d5a19ced31aa4611 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_4896981b-936c-4b0b-96ea-f86f50e80639.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:06:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:06:57 2025 --- +[2025-07-05 09:06:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:06:57 2025 --- +[2025-07-05 09:06:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:06:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:06:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:06:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:06:57] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:06:57] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:06:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48 +[2025-07-05 09:06:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48 +[2025-07-05 09:06:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:06:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:06:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:06:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:06:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:06:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:06:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:06:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:06:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:06:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:06:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:06:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:07:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:07:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:07:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:07:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:07:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:07:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:07:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:07:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:07:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:07:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:07:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:07:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:07:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:07:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:07:00] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:07:00] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:07:00] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:07:00] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:07:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:07:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:07:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:07:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:07:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:07:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:07:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:07:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:08:04] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:08:04] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:08:04] [Rank 0] PRINT: Starting training... +[2025-07-05 09:08:04] [Rank 0] PRINT: Starting training... +[2025-07-05 09:08:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:08:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:08:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:08:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:08:13] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.63ms +[2025-07-05 09:08:13] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.63ms +[2025-07-05 09:08:14] [Rank 0] step:41/10000 train_time:2349ms step_avg:57.29ms +[2025-07-05 09:08:14] [Rank 0] step:41/10000 train_time:2349ms step_avg:57.29ms +[2025-07-05 09:08:16] [Rank 0] step:61/10000 train_time:3678ms step_avg:60.29ms +[2025-07-05 09:08:16] [Rank 0] step:61/10000 train_time:3678ms step_avg:60.29ms +[2025-07-05 09:08:17] [Rank 0] step:81/10000 train_time:5007ms step_avg:61.81ms +[2025-07-05 09:08:17] [Rank 0] step:81/10000 train_time:5007ms step_avg:61.81ms +[2025-07-05 09:08:18] [Rank 0] step:101/10000 train_time:6340ms step_avg:62.78ms +[2025-07-05 09:08:18] [Rank 0] step:101/10000 train_time:6340ms step_avg:62.78ms +[2025-07-05 09:08:20] [Rank 0] step:121/10000 train_time:7669ms step_avg:63.38ms +[2025-07-05 09:08:20] [Rank 0] step:121/10000 train_time:7669ms step_avg:63.38ms +[2025-07-05 09:08:21] [Rank 0] step:141/10000 train_time:8997ms step_avg:63.81ms +[2025-07-05 09:08:21] [Rank 0] step:141/10000 train_time:8997ms step_avg:63.81ms +[2025-07-05 09:08:22] [Rank 0] step:161/10000 train_time:10327ms step_avg:64.14ms +[2025-07-05 09:08:22] [Rank 0] step:161/10000 train_time:10327ms step_avg:64.14ms +[2025-07-05 09:08:24] [Rank 0] step:181/10000 train_time:12332ms step_avg:68.14ms +[2025-07-05 09:08:24] [Rank 0] step:181/10000 train_time:12332ms step_avg:68.14ms +[2025-07-05 09:08:25] [Rank 0] step:201/10000 train_time:13050ms step_avg:64.92ms +[2025-07-05 09:08:25] [Rank 0] step:201/10000 train_time:13050ms step_avg:64.92ms +[2025-07-05 09:08:26] [Rank 0] step:221/10000 train_time:14380ms step_avg:65.07ms +[2025-07-05 09:08:26] [Rank 0] step:221/10000 train_time:14380ms step_avg:65.07ms +[2025-07-05 09:08:28] [Rank 0] step:241/10000 train_time:15711ms step_avg:65.19ms +[2025-07-05 09:08:28] [Rank 0] step:241/10000 train_time:15711ms step_avg:65.19ms +[2025-07-05 09:08:29] [Rank 0] step:261/10000 train_time:17043ms step_avg:65.30ms +[2025-07-05 09:08:29] [Rank 0] step:261/10000 train_time:17043ms step_avg:65.30ms +[2025-07-05 09:08:30] [Rank 0] step:281/10000 train_time:18376ms step_avg:65.39ms +[2025-07-05 09:08:30] [Rank 0] step:281/10000 train_time:18376ms step_avg:65.39ms +[2025-07-05 09:08:32] [Rank 0] step:301/10000 train_time:19707ms step_avg:65.47ms +[2025-07-05 09:08:32] [Rank 0] step:301/10000 train_time:19707ms step_avg:65.47ms +[2025-07-05 09:08:33] [Rank 0] step:321/10000 train_time:21038ms step_avg:65.54ms +[2025-07-05 09:08:33] [Rank 0] step:321/10000 train_time:21038ms step_avg:65.54ms +[2025-07-05 09:08:34] [Rank 0] step:341/10000 train_time:22369ms step_avg:65.60ms +[2025-07-05 09:08:34] [Rank 0] step:341/10000 train_time:22369ms step_avg:65.60ms +[2025-07-05 09:08:36] [Rank 0] step:361/10000 train_time:23956ms step_avg:66.36ms +[2025-07-05 09:08:36] [Rank 0] step:361/10000 train_time:23956ms step_avg:66.36ms +[2025-07-05 09:08:37] [Rank 0] step:381/10000 train_time:25082ms step_avg:65.83ms +[2025-07-05 09:08:37] [Rank 0] step:381/10000 train_time:25082ms step_avg:65.83ms +[2025-07-05 09:08:38] [Rank 0] step:401/10000 train_time:26413ms step_avg:65.87ms +[2025-07-05 09:08:38] [Rank 0] step:401/10000 train_time:26413ms step_avg:65.87ms +[2025-07-05 09:08:40] [Rank 0] step:421/10000 train_time:27744ms step_avg:65.90ms +[2025-07-05 09:08:40] [Rank 0] step:421/10000 train_time:27744ms step_avg:65.90ms +[2025-07-05 09:08:41] [Rank 0] step:441/10000 train_time:29076ms step_avg:65.93ms +[2025-07-05 09:08:41] [Rank 0] step:441/10000 train_time:29076ms step_avg:65.93ms +[2025-07-05 09:08:42] [Rank 0] step:461/10000 train_time:30411ms step_avg:65.97ms +[2025-07-05 09:08:42] [Rank 0] step:461/10000 train_time:30411ms step_avg:65.97ms +[2025-07-05 09:08:44] [Rank 0] step:481/10000 train_time:31743ms step_avg:65.99ms +[2025-07-05 09:08:44] [Rank 0] step:481/10000 train_time:31743ms step_avg:65.99ms +[2025-07-05 09:08:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:08:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:08:46] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33682ms step_avg:67.36ms +[2025-07-05 09:08:46] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33682ms step_avg:67.36ms +[2025-07-05 09:08:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:08:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..32000ce15be941c0de821eafac588a483ca33590 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c9f98955-f0e6-4444-8834-2767d2b5ff8d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/training_log_c9f98955-f0e6-4444-8834-2767d2b5ff8d.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/training_log_c9f98955-f0e6-4444-8834-2767d2b5ff8d.txt new file mode 100644 index 0000000000000000000000000000000000000000..756ad92b870e90083aea9e4f73e6e22a96ac8ab3 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49/training_log_c9f98955-f0e6-4444-8834-2767d2b5ff8d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:34:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:34:20 2025 --- +[2025-07-05 09:34:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:34:20 2025 --- +[2025-07-05 09:34:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:34:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 09:34:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:34:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:34:20] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:34:20] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:34:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49 +[2025-07-05 09:34:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_49 +[2025-07-05 09:34:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:34:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:34:20] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:34:20] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:34:20] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:34:20] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:34:22] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:34:22] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:34:22] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:34:22] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:34:22] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:34:22] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:34:23] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:34:23] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:34:23] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:34:23] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:34:23] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:34:23] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:34:23] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:34:23] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:34:23] [Rank 0] PRINT: Model returns: +[2025-07-05 09:34:23] [Rank 0] PRINT: Model returns: +[2025-07-05 09:34:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:34:23] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:34:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:34:23] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:34:23] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:34:23] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 09:34:23] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:34:23] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:34:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:34:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:34:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:34:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:34:23] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:34:23] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:34:23] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:34:23] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:35:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:35:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:35:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:35:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:35:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:35:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:35:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:35:36] [Rank 0] step:21/10000 train_time:1025ms step_avg:48.81ms +[2025-07-05 09:35:36] [Rank 0] step:21/10000 train_time:1025ms step_avg:48.81ms +[2025-07-05 09:35:37] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.26ms +[2025-07-05 09:35:37] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.26ms +[2025-07-05 09:35:39] [Rank 0] step:61/10000 train_time:3671ms step_avg:60.18ms +[2025-07-05 09:35:39] [Rank 0] step:61/10000 train_time:3671ms step_avg:60.18ms +[2025-07-05 09:35:40] [Rank 0] step:81/10000 train_time:4994ms step_avg:61.66ms +[2025-07-05 09:35:40] [Rank 0] step:81/10000 train_time:4994ms step_avg:61.66ms +[2025-07-05 09:35:41] [Rank 0] step:101/10000 train_time:6319ms step_avg:62.56ms +[2025-07-05 09:35:41] [Rank 0] step:101/10000 train_time:6319ms step_avg:62.56ms +[2025-07-05 09:35:43] [Rank 0] step:121/10000 train_time:7644ms step_avg:63.17ms +[2025-07-05 09:35:43] [Rank 0] step:121/10000 train_time:7644ms step_avg:63.17ms +[2025-07-05 09:35:44] [Rank 0] step:141/10000 train_time:8967ms step_avg:63.60ms +[2025-07-05 09:35:44] [Rank 0] step:141/10000 train_time:8967ms step_avg:63.60ms +[2025-07-05 09:35:45] [Rank 0] step:161/10000 train_time:10291ms step_avg:63.92ms +[2025-07-05 09:35:45] [Rank 0] step:161/10000 train_time:10291ms step_avg:63.92ms +[2025-07-05 09:35:47] [Rank 0] step:181/10000 train_time:12282ms step_avg:67.86ms +[2025-07-05 09:35:47] [Rank 0] step:181/10000 train_time:12282ms step_avg:67.86ms +[2025-07-05 09:35:48] [Rank 0] step:201/10000 train_time:12995ms step_avg:64.65ms +[2025-07-05 09:35:48] [Rank 0] step:201/10000 train_time:12995ms step_avg:64.65ms +[2025-07-05 09:35:49] [Rank 0] step:221/10000 train_time:14321ms step_avg:64.80ms +[2025-07-05 09:35:49] [Rank 0] step:221/10000 train_time:14321ms step_avg:64.80ms +[2025-07-05 09:35:51] [Rank 0] step:241/10000 train_time:15647ms step_avg:64.93ms +[2025-07-05 09:35:51] [Rank 0] step:241/10000 train_time:15647ms step_avg:64.93ms +[2025-07-05 09:35:52] [Rank 0] step:261/10000 train_time:16973ms step_avg:65.03ms +[2025-07-05 09:35:52] [Rank 0] step:261/10000 train_time:16973ms step_avg:65.03ms +[2025-07-05 09:35:53] [Rank 0] step:281/10000 train_time:18301ms step_avg:65.13ms +[2025-07-05 09:35:53] [Rank 0] step:281/10000 train_time:18301ms step_avg:65.13ms +[2025-07-05 09:35:55] [Rank 0] step:301/10000 train_time:19629ms step_avg:65.21ms +[2025-07-05 09:35:55] [Rank 0] step:301/10000 train_time:19629ms step_avg:65.21ms +[2025-07-05 09:35:56] [Rank 0] step:321/10000 train_time:20955ms step_avg:65.28ms +[2025-07-05 09:35:56] [Rank 0] step:321/10000 train_time:20955ms step_avg:65.28ms +[2025-07-05 09:35:57] [Rank 0] step:341/10000 train_time:22283ms step_avg:65.34ms +[2025-07-05 09:35:57] [Rank 0] step:341/10000 train_time:22283ms step_avg:65.34ms +[2025-07-05 09:35:59] [Rank 0] step:361/10000 train_time:23866ms step_avg:66.11ms +[2025-07-05 09:35:59] [Rank 0] step:361/10000 train_time:23866ms step_avg:66.11ms +[2025-07-05 09:36:00] [Rank 0] step:381/10000 train_time:24995ms step_avg:65.60ms +[2025-07-05 09:36:00] [Rank 0] step:381/10000 train_time:24995ms step_avg:65.60ms +[2025-07-05 09:36:01] [Rank 0] step:401/10000 train_time:26362ms step_avg:65.74ms +[2025-07-05 09:36:01] [Rank 0] step:401/10000 train_time:26362ms step_avg:65.74ms +[2025-07-05 09:36:03] [Rank 0] step:421/10000 train_time:27690ms step_avg:65.77ms +[2025-07-05 09:36:03] [Rank 0] step:421/10000 train_time:27690ms step_avg:65.77ms +[2025-07-05 09:36:04] [Rank 0] step:441/10000 train_time:29018ms step_avg:65.80ms +[2025-07-05 09:36:04] [Rank 0] step:441/10000 train_time:29018ms step_avg:65.80ms +[2025-07-05 09:36:05] [Rank 0] step:461/10000 train_time:30346ms step_avg:65.83ms +[2025-07-05 09:36:05] [Rank 0] step:461/10000 train_time:30346ms step_avg:65.83ms +[2025-07-05 09:36:07] [Rank 0] step:481/10000 train_time:31674ms step_avg:65.85ms +[2025-07-05 09:36:07] [Rank 0] step:481/10000 train_time:31674ms step_avg:65.85ms +[2025-07-05 09:36:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:36:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:36:09] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33606ms step_avg:67.21ms +[2025-07-05 09:36:09] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33606ms step_avg:67.21ms +[2025-07-05 09:36:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:36:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..36ed2c29732b5765b91bfc6829952cad7862a10b --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9ca03ae9-3341-4bb0-b633-40120f451041", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/training_log_9ca03ae9-3341-4bb0-b633-40120f451041.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/training_log_9ca03ae9-3341-4bb0-b633-40120f451041.txt new file mode 100644 index 0000000000000000000000000000000000000000..bacdffc9fe96ed9501dcc7bec8a9201640ca3759 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50/training_log_9ca03ae9-3341-4bb0-b633-40120f451041.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:01:42] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:01:42 2025 --- +[2025-07-05 10:01:42] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:01:42 2025 --- +[2025-07-05 10:01:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:01:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:01:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:01:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:01:42] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 10:01:42] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 10:01:42] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50 +[2025-07-05 10:01:42] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_50 +[2025-07-05 10:01:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:01:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:01:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:01:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:01:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:01:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:01:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:01:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:01:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:01:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:01:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:01:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:01:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:01:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:01:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:01:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:01:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:01:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:01:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:01:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:01:45] [Rank 0] PRINT: Model returns: +[2025-07-05 10:01:45] [Rank 0] PRINT: Model returns: +[2025-07-05 10:01:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:01:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:01:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:01:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:01:45] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:01:45] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:01:45] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:01:45] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:01:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:01:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:01:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:01:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:01:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:01:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:01:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:01:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:02:50] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:02:50] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:02:50] [Rank 0] PRINT: Starting training... +[2025-07-05 10:02:50] [Rank 0] PRINT: Starting training... +[2025-07-05 10:02:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:02:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:02:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:02:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:02:59] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.56ms +[2025-07-05 10:02:59] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.56ms +[2025-07-05 10:03:00] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 10:03:00] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 10:03:01] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-05 10:03:01] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-05 10:03:03] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.76ms +[2025-07-05 10:03:03] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.76ms +[2025-07-05 10:03:04] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.67ms +[2025-07-05 10:03:04] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.67ms +[2025-07-05 10:03:05] [Rank 0] step:121/10000 train_time:7658ms step_avg:63.29ms +[2025-07-05 10:03:05] [Rank 0] step:121/10000 train_time:7658ms step_avg:63.29ms +[2025-07-05 10:03:07] [Rank 0] step:141/10000 train_time:8987ms step_avg:63.74ms +[2025-07-05 10:03:07] [Rank 0] step:141/10000 train_time:8987ms step_avg:63.74ms +[2025-07-05 10:03:08] [Rank 0] step:161/10000 train_time:10316ms step_avg:64.08ms +[2025-07-05 10:03:08] [Rank 0] step:161/10000 train_time:10316ms step_avg:64.08ms +[2025-07-05 10:03:09] [Rank 0] step:181/10000 train_time:12305ms step_avg:67.98ms +[2025-07-05 10:03:09] [Rank 0] step:181/10000 train_time:12305ms step_avg:67.98ms +[2025-07-05 10:03:11] [Rank 0] step:201/10000 train_time:13021ms step_avg:64.78ms +[2025-07-05 10:03:11] [Rank 0] step:201/10000 train_time:13021ms step_avg:64.78ms +[2025-07-05 10:03:12] [Rank 0] step:221/10000 train_time:14350ms step_avg:64.93ms +[2025-07-05 10:03:12] [Rank 0] step:221/10000 train_time:14350ms step_avg:64.93ms +[2025-07-05 10:03:13] [Rank 0] step:241/10000 train_time:15679ms step_avg:65.06ms +[2025-07-05 10:03:13] [Rank 0] step:241/10000 train_time:15679ms step_avg:65.06ms +[2025-07-05 10:03:15] [Rank 0] step:261/10000 train_time:17009ms step_avg:65.17ms +[2025-07-05 10:03:15] [Rank 0] step:261/10000 train_time:17009ms step_avg:65.17ms +[2025-07-05 10:03:16] [Rank 0] step:281/10000 train_time:18338ms step_avg:65.26ms +[2025-07-05 10:03:16] [Rank 0] step:281/10000 train_time:18338ms step_avg:65.26ms +[2025-07-05 10:03:17] [Rank 0] step:301/10000 train_time:19670ms step_avg:65.35ms +[2025-07-05 10:03:17] [Rank 0] step:301/10000 train_time:19670ms step_avg:65.35ms +[2025-07-05 10:03:19] [Rank 0] step:321/10000 train_time:21001ms step_avg:65.42ms +[2025-07-05 10:03:19] [Rank 0] step:321/10000 train_time:21001ms step_avg:65.42ms +[2025-07-05 10:03:20] [Rank 0] step:341/10000 train_time:22331ms step_avg:65.49ms +[2025-07-05 10:03:20] [Rank 0] step:341/10000 train_time:22331ms step_avg:65.49ms +[2025-07-05 10:03:21] [Rank 0] step:361/10000 train_time:23662ms step_avg:65.55ms +[2025-07-05 10:03:21] [Rank 0] step:361/10000 train_time:23662ms step_avg:65.55ms +[2025-07-05 10:03:23] [Rank 0] step:381/10000 train_time:25055ms step_avg:65.76ms +[2025-07-05 10:03:23] [Rank 0] step:381/10000 train_time:25055ms step_avg:65.76ms +[2025-07-05 10:03:24] [Rank 0] step:401/10000 train_time:26386ms step_avg:65.80ms +[2025-07-05 10:03:24] [Rank 0] step:401/10000 train_time:26386ms step_avg:65.80ms +[2025-07-05 10:03:25] [Rank 0] step:421/10000 train_time:27716ms step_avg:65.83ms +[2025-07-05 10:03:25] [Rank 0] step:421/10000 train_time:27716ms step_avg:65.83ms +[2025-07-05 10:03:27] [Rank 0] step:441/10000 train_time:29096ms step_avg:65.98ms +[2025-07-05 10:03:27] [Rank 0] step:441/10000 train_time:29096ms step_avg:65.98ms +[2025-07-05 10:03:28] [Rank 0] step:461/10000 train_time:30425ms step_avg:66.00ms +[2025-07-05 10:03:28] [Rank 0] step:461/10000 train_time:30425ms step_avg:66.00ms +[2025-07-05 10:03:29] [Rank 0] step:481/10000 train_time:31755ms step_avg:66.02ms +[2025-07-05 10:03:29] [Rank 0] step:481/10000 train_time:31755ms step_avg:66.02ms +[2025-07-05 10:03:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:03:32] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33689ms step_avg:67.38ms +[2025-07-05 10:03:32] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33689ms step_avg:67.38ms +[2025-07-05 10:03:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:03:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3e20e763d05adec8377d4b43c42b654ae8251205 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "395a7752-49a3-49fd-89fb-3577021a7ffa", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/training_log_395a7752-49a3-49fd-89fb-3577021a7ffa.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/training_log_395a7752-49a3-49fd-89fb-3577021a7ffa.txt new file mode 100644 index 0000000000000000000000000000000000000000..1bb5936ed6cd6881a6c79d477ba65cbc97d13732 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51/training_log_395a7752-49a3-49fd-89fb-3577021a7ffa.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:29:17] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:29:17 2025 --- +[2025-07-05 10:29:17] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:29:17 2025 --- +[2025-07-05 10:29:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:29:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-05 10:29:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:29:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:29:17] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:29:17] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:29:17] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51 +[2025-07-05 10:29:17] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_51 +[2025-07-05 10:29:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:29:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:29:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:29:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:29:17] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:29:17] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:29:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:29:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:29:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:29:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:29:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:29:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:29:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:29:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:29:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:29:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:29:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:29:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:29:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:29:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:29:20] [Rank 0] PRINT: Model returns: +[2025-07-05 10:29:20] [Rank 0] PRINT: Model returns: +[2025-07-05 10:29:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:29:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:29:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:29:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:29:20] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:29:20] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-05 10:29:20] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:29:20] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:29:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:29:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:29:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:29:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:29:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:29:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:29:20] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:29:20] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:30:23] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:30:23] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:30:23] [Rank 0] PRINT: Starting training... +[2025-07-05 10:30:23] [Rank 0] PRINT: Starting training... +[2025-07-05 10:30:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:30:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:30:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:30:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:30:32] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.65ms +[2025-07-05 10:30:32] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.65ms +[2025-07-05 10:30:33] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 10:30:33] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 10:30:35] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.20ms +[2025-07-05 10:30:35] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.20ms +[2025-07-05 10:30:36] [Rank 0] step:81/10000 train_time:4998ms step_avg:61.71ms +[2025-07-05 10:30:36] [Rank 0] step:81/10000 train_time:4998ms step_avg:61.71ms +[2025-07-05 10:30:37] [Rank 0] step:101/10000 train_time:6327ms step_avg:62.64ms +[2025-07-05 10:30:37] [Rank 0] step:101/10000 train_time:6327ms step_avg:62.64ms +[2025-07-05 10:30:39] [Rank 0] step:121/10000 train_time:7655ms step_avg:63.27ms +[2025-07-05 10:30:39] [Rank 0] step:121/10000 train_time:7655ms step_avg:63.27ms +[2025-07-05 10:30:40] [Rank 0] step:141/10000 train_time:8984ms step_avg:63.72ms +[2025-07-05 10:30:40] [Rank 0] step:141/10000 train_time:8984ms step_avg:63.72ms +[2025-07-05 10:30:41] [Rank 0] step:161/10000 train_time:10312ms step_avg:64.05ms +[2025-07-05 10:30:41] [Rank 0] step:161/10000 train_time:10312ms step_avg:64.05ms +[2025-07-05 10:30:43] [Rank 0] step:181/10000 train_time:11690ms step_avg:64.58ms +[2025-07-05 10:30:43] [Rank 0] step:181/10000 train_time:11690ms step_avg:64.58ms +[2025-07-05 10:30:44] [Rank 0] step:201/10000 train_time:13016ms step_avg:64.76ms +[2025-07-05 10:30:44] [Rank 0] step:201/10000 train_time:13016ms step_avg:64.76ms +[2025-07-05 10:30:45] [Rank 0] step:221/10000 train_time:14345ms step_avg:64.91ms +[2025-07-05 10:30:45] [Rank 0] step:221/10000 train_time:14345ms step_avg:64.91ms +[2025-07-05 10:30:47] [Rank 0] step:241/10000 train_time:15672ms step_avg:65.03ms +[2025-07-05 10:30:47] [Rank 0] step:241/10000 train_time:15672ms step_avg:65.03ms +[2025-07-05 10:30:48] [Rank 0] step:261/10000 train_time:17002ms step_avg:65.14ms +[2025-07-05 10:30:48] [Rank 0] step:261/10000 train_time:17002ms step_avg:65.14ms +[2025-07-05 10:30:49] [Rank 0] step:281/10000 train_time:18331ms step_avg:65.23ms +[2025-07-05 10:30:49] [Rank 0] step:281/10000 train_time:18331ms step_avg:65.23ms +[2025-07-05 10:30:51] [Rank 0] step:301/10000 train_time:19702ms step_avg:65.46ms +[2025-07-05 10:30:51] [Rank 0] step:301/10000 train_time:19702ms step_avg:65.46ms +[2025-07-05 10:30:52] [Rank 0] step:321/10000 train_time:21032ms step_avg:65.52ms +[2025-07-05 10:30:52] [Rank 0] step:321/10000 train_time:21032ms step_avg:65.52ms +[2025-07-05 10:30:53] [Rank 0] step:341/10000 train_time:22362ms step_avg:65.58ms +[2025-07-05 10:30:53] [Rank 0] step:341/10000 train_time:22362ms step_avg:65.58ms +[2025-07-05 10:30:55] [Rank 0] step:361/10000 train_time:23947ms step_avg:66.34ms +[2025-07-05 10:30:55] [Rank 0] step:361/10000 train_time:23947ms step_avg:66.34ms +[2025-07-05 10:30:56] [Rank 0] step:381/10000 train_time:25083ms step_avg:65.83ms +[2025-07-05 10:30:56] [Rank 0] step:381/10000 train_time:25083ms step_avg:65.83ms +[2025-07-05 10:30:57] [Rank 0] step:401/10000 train_time:26414ms step_avg:65.87ms +[2025-07-05 10:30:57] [Rank 0] step:401/10000 train_time:26414ms step_avg:65.87ms +[2025-07-05 10:30:59] [Rank 0] step:421/10000 train_time:27745ms step_avg:65.90ms +[2025-07-05 10:30:59] [Rank 0] step:421/10000 train_time:27745ms step_avg:65.90ms +[2025-07-05 10:31:00] [Rank 0] step:441/10000 train_time:29075ms step_avg:65.93ms +[2025-07-05 10:31:00] [Rank 0] step:441/10000 train_time:29075ms step_avg:65.93ms +[2025-07-05 10:31:01] [Rank 0] step:461/10000 train_time:30403ms step_avg:65.95ms +[2025-07-05 10:31:01] [Rank 0] step:461/10000 train_time:30403ms step_avg:65.95ms +[2025-07-05 10:31:03] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 10:31:03] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 10:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:31:05] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33670ms step_avg:67.34ms +[2025-07-05 10:31:05] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33670ms step_avg:67.34ms +[2025-07-05 10:31:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:31:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2667e5ca99ed2b26e70ba2c76dcab52e6a4ff307 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "4e31211e-5458-456f-a62f-a0720666b141", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_4e31211e-5458-456f-a62f-a0720666b141.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_4e31211e-5458-456f-a62f-a0720666b141.txt new file mode 100644 index 0000000000000000000000000000000000000000..81bb34b4f2fd40586d8b2b0bbc1109316d37004f --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_4e31211e-5458-456f-a62f-a0720666b141.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:28:41] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:28:41 2025 --- +[2025-07-05 08:28:41] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:28:41 2025 --- +[2025-07-05 08:28:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:28:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:28:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:28:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:28:41] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:28:41] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:28:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42 +[2025-07-05 08:28:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42 +[2025-07-05 08:28:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:28:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:28:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:28:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:28:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:28:41] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:28:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:28:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:28:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:28:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:28:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:28:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:28:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:28:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:28:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:28:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:28:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:28:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:28:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:28:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:28:44] [Rank 0] PRINT: Model returns: +[2025-07-05 08:28:44] [Rank 0] PRINT: Model returns: +[2025-07-05 08:28:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:28:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:28:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:28:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:28:44] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:28:44] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:28:44] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:28:44] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:28:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:28:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:28:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:28:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:28:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:28:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:28:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:28:44] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:29:47] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:29:47] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:29:48] [Rank 0] PRINT: Starting training... +[2025-07-05 08:29:48] [Rank 0] PRINT: Starting training... +[2025-07-05 08:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:29:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:29:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:29:56] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:29:56] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:29:58] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.28ms +[2025-07-05 08:29:58] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.28ms +[2025-07-05 08:29:59] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.26ms +[2025-07-05 08:29:59] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.26ms +[2025-07-05 08:30:00] [Rank 0] step:81/10000 train_time:5100ms step_avg:62.97ms +[2025-07-05 08:30:00] [Rank 0] step:81/10000 train_time:5100ms step_avg:62.97ms +[2025-07-05 08:30:02] [Rank 0] step:101/10000 train_time:6427ms step_avg:63.63ms +[2025-07-05 08:30:02] [Rank 0] step:101/10000 train_time:6427ms step_avg:63.63ms +[2025-07-05 08:30:03] [Rank 0] step:121/10000 train_time:7753ms step_avg:64.07ms +[2025-07-05 08:30:03] [Rank 0] step:121/10000 train_time:7753ms step_avg:64.07ms +[2025-07-05 08:30:04] [Rank 0] step:141/10000 train_time:9078ms step_avg:64.38ms +[2025-07-05 08:30:04] [Rank 0] step:141/10000 train_time:9078ms step_avg:64.38ms +[2025-07-05 08:30:06] [Rank 0] step:161/10000 train_time:10404ms step_avg:64.62ms +[2025-07-05 08:30:06] [Rank 0] step:161/10000 train_time:10404ms step_avg:64.62ms +[2025-07-05 08:30:07] [Rank 0] step:181/10000 train_time:11982ms step_avg:66.20ms +[2025-07-05 08:30:07] [Rank 0] step:181/10000 train_time:11982ms step_avg:66.20ms +[2025-07-05 08:30:08] [Rank 0] step:201/10000 train_time:13224ms step_avg:65.79ms +[2025-07-05 08:30:08] [Rank 0] step:201/10000 train_time:13224ms step_avg:65.79ms +[2025-07-05 08:30:10] [Rank 0] step:221/10000 train_time:14551ms step_avg:65.84ms +[2025-07-05 08:30:10] [Rank 0] step:221/10000 train_time:14551ms step_avg:65.84ms +[2025-07-05 08:30:11] [Rank 0] step:241/10000 train_time:15878ms step_avg:65.89ms +[2025-07-05 08:30:11] [Rank 0] step:241/10000 train_time:15878ms step_avg:65.89ms +[2025-07-05 08:30:12] [Rank 0] step:261/10000 train_time:17205ms step_avg:65.92ms +[2025-07-05 08:30:12] [Rank 0] step:261/10000 train_time:17205ms step_avg:65.92ms +[2025-07-05 08:30:14] [Rank 0] step:281/10000 train_time:18530ms step_avg:65.94ms +[2025-07-05 08:30:14] [Rank 0] step:281/10000 train_time:18530ms step_avg:65.94ms +[2025-07-05 08:30:15] [Rank 0] step:301/10000 train_time:19858ms step_avg:65.97ms +[2025-07-05 08:30:15] [Rank 0] step:301/10000 train_time:19858ms step_avg:65.97ms +[2025-07-05 08:30:16] [Rank 0] step:321/10000 train_time:21188ms step_avg:66.01ms +[2025-07-05 08:30:16] [Rank 0] step:321/10000 train_time:21188ms step_avg:66.01ms +[2025-07-05 08:30:18] [Rank 0] step:341/10000 train_time:22618ms step_avg:66.33ms +[2025-07-05 08:30:18] [Rank 0] step:341/10000 train_time:22618ms step_avg:66.33ms +[2025-07-05 08:30:19] [Rank 0] step:361/10000 train_time:24603ms step_avg:68.15ms +[2025-07-05 08:30:19] [Rank 0] step:361/10000 train_time:24603ms step_avg:68.15ms +[2025-07-05 08:30:21] [Rank 0] step:381/10000 train_time:25319ms step_avg:66.45ms +[2025-07-05 08:30:21] [Rank 0] step:381/10000 train_time:25319ms step_avg:66.45ms +[2025-07-05 08:30:22] [Rank 0] step:401/10000 train_time:26749ms step_avg:66.70ms +[2025-07-05 08:30:22] [Rank 0] step:401/10000 train_time:26749ms step_avg:66.70ms +[2025-07-05 08:30:23] [Rank 0] step:421/10000 train_time:28081ms step_avg:66.70ms +[2025-07-05 08:30:23] [Rank 0] step:421/10000 train_time:28081ms step_avg:66.70ms +[2025-07-05 08:30:25] [Rank 0] step:441/10000 train_time:29413ms step_avg:66.70ms +[2025-07-05 08:30:25] [Rank 0] step:441/10000 train_time:29413ms step_avg:66.70ms +[2025-07-05 08:30:26] [Rank 0] step:461/10000 train_time:30743ms step_avg:66.69ms +[2025-07-05 08:30:26] [Rank 0] step:461/10000 train_time:30743ms step_avg:66.69ms +[2025-07-05 08:30:27] [Rank 0] step:481/10000 train_time:32075ms step_avg:66.68ms +[2025-07-05 08:30:27] [Rank 0] step:481/10000 train_time:32075ms step_avg:66.68ms +[2025-07-05 08:30:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:30:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:30:30] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1371 train_time:34012ms step_avg:68.02ms +[2025-07-05 08:30:30] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1371 train_time:34012ms step_avg:68.02ms +[2025-07-05 08:30:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:30:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fc18a7448c2ea8d0efd1bacbc86cb43eeb8ed18d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9525e193-10eb-419c-ac07-cce4f74c9a1c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/training_log_9525e193-10eb-419c-ac07-cce4f74c9a1c.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/training_log_9525e193-10eb-419c-ac07-cce4f74c9a1c.txt new file mode 100644 index 0000000000000000000000000000000000000000..79fe15b7aa3cfdb125f3999ab317df9f7cb48b5b --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43/training_log_9525e193-10eb-419c-ac07-cce4f74c9a1c.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:55:50] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:55:50 2025 --- +[2025-07-05 08:55:50] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:55:50 2025 --- +[2025-07-05 08:55:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:55:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:55:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:55:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:55:50] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:55:50] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:55:50] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43 +[2025-07-05 08:55:50] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_43 +[2025-07-05 08:55:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:55:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:55:50] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:55:50] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:55:50] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:55:50] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:55:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:55:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:55:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:55:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:55:53] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:55:53] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:55:53] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:55:53] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:55:53] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:55:53] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:55:53] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:55:53] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:55:53] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:55:53] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:55:53] [Rank 0] PRINT: Model returns: +[2025-07-05 08:55:53] [Rank 0] PRINT: Model returns: +[2025-07-05 08:55:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:55:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:55:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:55:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:55:53] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:55:53] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:55:53] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:55:53] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:55:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:55:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:55:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:55:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:55:53] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:55:53] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:55:53] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:55:53] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:56:58] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:56:58] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:56:58] [Rank 0] PRINT: Starting training... +[2025-07-05 08:56:58] [Rank 0] PRINT: Starting training... +[2025-07-05 08:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:05] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:57:05] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:57:07] [Rank 0] step:21/10000 train_time:819ms step_avg:39.00ms +[2025-07-05 08:57:07] [Rank 0] step:21/10000 train_time:819ms step_avg:39.00ms +[2025-07-05 08:57:08] [Rank 0] step:41/10000 train_time:2146ms step_avg:52.33ms +[2025-07-05 08:57:08] [Rank 0] step:41/10000 train_time:2146ms step_avg:52.33ms +[2025-07-05 08:57:09] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-05 08:57:09] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-05 08:57:11] [Rank 0] step:81/10000 train_time:4801ms step_avg:59.27ms +[2025-07-05 08:57:11] [Rank 0] step:81/10000 train_time:4801ms step_avg:59.27ms +[2025-07-05 08:57:12] [Rank 0] step:101/10000 train_time:6128ms step_avg:60.67ms +[2025-07-05 08:57:12] [Rank 0] step:101/10000 train_time:6128ms step_avg:60.67ms +[2025-07-05 08:57:13] [Rank 0] step:121/10000 train_time:7454ms step_avg:61.60ms +[2025-07-05 08:57:13] [Rank 0] step:121/10000 train_time:7454ms step_avg:61.60ms +[2025-07-05 08:57:15] [Rank 0] step:141/10000 train_time:8781ms step_avg:62.28ms +[2025-07-05 08:57:15] [Rank 0] step:141/10000 train_time:8781ms step_avg:62.28ms +[2025-07-05 08:57:16] [Rank 0] step:161/10000 train_time:10107ms step_avg:62.78ms +[2025-07-05 08:57:16] [Rank 0] step:161/10000 train_time:10107ms step_avg:62.78ms +[2025-07-05 08:57:17] [Rank 0] step:181/10000 train_time:11435ms step_avg:63.18ms +[2025-07-05 08:57:17] [Rank 0] step:181/10000 train_time:11435ms step_avg:63.18ms +[2025-07-05 08:57:19] [Rank 0] step:201/10000 train_time:12827ms step_avg:63.82ms +[2025-07-05 08:57:19] [Rank 0] step:201/10000 train_time:12827ms step_avg:63.82ms +[2025-07-05 08:57:20] [Rank 0] step:221/10000 train_time:14154ms step_avg:64.04ms +[2025-07-05 08:57:20] [Rank 0] step:221/10000 train_time:14154ms step_avg:64.04ms +[2025-07-05 08:57:21] [Rank 0] step:241/10000 train_time:15481ms step_avg:64.24ms +[2025-07-05 08:57:21] [Rank 0] step:241/10000 train_time:15481ms step_avg:64.24ms +[2025-07-05 08:57:23] [Rank 0] step:261/10000 train_time:16808ms step_avg:64.40ms +[2025-07-05 08:57:23] [Rank 0] step:261/10000 train_time:16808ms step_avg:64.40ms +[2025-07-05 08:57:24] [Rank 0] step:281/10000 train_time:18142ms step_avg:64.56ms +[2025-07-05 08:57:24] [Rank 0] step:281/10000 train_time:18142ms step_avg:64.56ms +[2025-07-05 08:57:25] [Rank 0] step:301/10000 train_time:19470ms step_avg:64.68ms +[2025-07-05 08:57:25] [Rank 0] step:301/10000 train_time:19470ms step_avg:64.68ms +[2025-07-05 08:57:27] [Rank 0] step:321/10000 train_time:20799ms step_avg:64.79ms +[2025-07-05 08:57:27] [Rank 0] step:321/10000 train_time:20799ms step_avg:64.79ms +[2025-07-05 08:57:28] [Rank 0] step:341/10000 train_time:22128ms step_avg:64.89ms +[2025-07-05 08:57:28] [Rank 0] step:341/10000 train_time:22128ms step_avg:64.89ms +[2025-07-05 08:57:29] [Rank 0] step:361/10000 train_time:23503ms step_avg:65.10ms +[2025-07-05 08:57:29] [Rank 0] step:361/10000 train_time:23503ms step_avg:65.10ms +[2025-07-05 08:57:31] [Rank 0] step:381/10000 train_time:24828ms step_avg:65.17ms +[2025-07-05 08:57:31] [Rank 0] step:381/10000 train_time:24828ms step_avg:65.17ms +[2025-07-05 08:57:32] [Rank 0] step:401/10000 train_time:26158ms step_avg:65.23ms +[2025-07-05 08:57:32] [Rank 0] step:401/10000 train_time:26158ms step_avg:65.23ms +[2025-07-05 08:57:33] [Rank 0] step:421/10000 train_time:27486ms step_avg:65.29ms +[2025-07-05 08:57:33] [Rank 0] step:421/10000 train_time:27486ms step_avg:65.29ms +[2025-07-05 08:57:35] [Rank 0] step:441/10000 train_time:28815ms step_avg:65.34ms +[2025-07-05 08:57:35] [Rank 0] step:441/10000 train_time:28815ms step_avg:65.34ms +[2025-07-05 08:57:36] [Rank 0] step:461/10000 train_time:30146ms step_avg:65.39ms +[2025-07-05 08:57:36] [Rank 0] step:461/10000 train_time:30146ms step_avg:65.39ms +[2025-07-05 08:57:37] [Rank 0] step:481/10000 train_time:31476ms step_avg:65.44ms +[2025-07-05 08:57:37] [Rank 0] step:481/10000 train_time:31476ms step_avg:65.44ms +[2025-07-05 08:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:39] [Rank 0] PRINT: step:500/10000 train_loss:8.7429 val_loss:7.1354 train_time:33410ms step_avg:66.82ms +[2025-07-05 08:57:39] [Rank 0] PRINT: step:500/10000 train_loss:8.7429 val_loss:7.1354 train_time:33410ms step_avg:66.82ms +[2025-07-05 08:57:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:57:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4563cafeb9dc211cb81b4f7ec7ad4ec0233a5883 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "16c69316-9662-42b2-b1ee-e49506b191dd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/training_log_16c69316-9662-42b2-b1ee-e49506b191dd.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/training_log_16c69316-9662-42b2-b1ee-e49506b191dd.txt new file mode 100644 index 0000000000000000000000000000000000000000..006c2641a33bddebcff7b48411d7364ca74d0b5d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44/training_log_16c69316-9662-42b2-b1ee-e49506b191dd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:23:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:23:29 2025 --- +[2025-07-05 09:23:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:23:29 2025 --- +[2025-07-05 09:23:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:23:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:23:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:23:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:23:29] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:23:29] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:23:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44 +[2025-07-05 09:23:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_44 +[2025-07-05 09:23:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:23:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:23:30] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:23:30] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:23:30] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:23:30] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:23:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:23:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:23:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:23:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:23:32] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:23:32] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:23:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:23:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:23:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:23:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:23:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:23:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:23:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:23:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:23:33] [Rank 0] PRINT: Model returns: +[2025-07-05 09:23:33] [Rank 0] PRINT: Model returns: +[2025-07-05 09:23:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:23:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:23:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:23:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:23:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:23:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:23:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:23:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:23:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:23:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:23:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:23:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:23:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:23:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:23:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:23:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:24:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:24:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:24:36] [Rank 0] PRINT: Starting training... +[2025-07-05 09:24:36] [Rank 0] PRINT: Starting training... +[2025-07-05 09:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:24:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:24:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:24:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:24:45] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.55ms +[2025-07-05 09:24:45] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.55ms +[2025-07-05 09:24:46] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.28ms +[2025-07-05 09:24:46] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.28ms +[2025-07-05 09:24:47] [Rank 0] step:61/10000 train_time:3678ms step_avg:60.29ms +[2025-07-05 09:24:47] [Rank 0] step:61/10000 train_time:3678ms step_avg:60.29ms +[2025-07-05 09:24:49] [Rank 0] step:81/10000 train_time:5006ms step_avg:61.81ms +[2025-07-05 09:24:49] [Rank 0] step:81/10000 train_time:5006ms step_avg:61.81ms +[2025-07-05 09:24:50] [Rank 0] step:101/10000 train_time:6334ms step_avg:62.71ms +[2025-07-05 09:24:50] [Rank 0] step:101/10000 train_time:6334ms step_avg:62.71ms +[2025-07-05 09:24:51] [Rank 0] step:121/10000 train_time:7662ms step_avg:63.32ms +[2025-07-05 09:24:51] [Rank 0] step:121/10000 train_time:7662ms step_avg:63.32ms +[2025-07-05 09:24:53] [Rank 0] step:141/10000 train_time:8989ms step_avg:63.75ms +[2025-07-05 09:24:53] [Rank 0] step:141/10000 train_time:8989ms step_avg:63.75ms +[2025-07-05 09:24:54] [Rank 0] step:161/10000 train_time:10317ms step_avg:64.08ms +[2025-07-05 09:24:54] [Rank 0] step:161/10000 train_time:10317ms step_avg:64.08ms +[2025-07-05 09:24:55] [Rank 0] step:181/10000 train_time:11692ms step_avg:64.59ms +[2025-07-05 09:24:55] [Rank 0] step:181/10000 train_time:11692ms step_avg:64.59ms +[2025-07-05 09:24:57] [Rank 0] step:201/10000 train_time:13036ms step_avg:64.86ms +[2025-07-05 09:24:57] [Rank 0] step:201/10000 train_time:13036ms step_avg:64.86ms +[2025-07-05 09:24:58] [Rank 0] step:221/10000 train_time:14364ms step_avg:64.99ms +[2025-07-05 09:24:58] [Rank 0] step:221/10000 train_time:14364ms step_avg:64.99ms +[2025-07-05 09:24:59] [Rank 0] step:241/10000 train_time:15691ms step_avg:65.11ms +[2025-07-05 09:24:59] [Rank 0] step:241/10000 train_time:15691ms step_avg:65.11ms +[2025-07-05 09:25:01] [Rank 0] step:261/10000 train_time:17021ms step_avg:65.21ms +[2025-07-05 09:25:01] [Rank 0] step:261/10000 train_time:17021ms step_avg:65.21ms +[2025-07-05 09:25:02] [Rank 0] step:281/10000 train_time:18349ms step_avg:65.30ms +[2025-07-05 09:25:02] [Rank 0] step:281/10000 train_time:18349ms step_avg:65.30ms +[2025-07-05 09:25:03] [Rank 0] step:301/10000 train_time:19678ms step_avg:65.38ms +[2025-07-05 09:25:03] [Rank 0] step:301/10000 train_time:19678ms step_avg:65.38ms +[2025-07-05 09:25:05] [Rank 0] step:321/10000 train_time:21008ms step_avg:65.45ms +[2025-07-05 09:25:05] [Rank 0] step:321/10000 train_time:21008ms step_avg:65.45ms +[2025-07-05 09:25:06] [Rank 0] step:341/10000 train_time:22338ms step_avg:65.51ms +[2025-07-05 09:25:06] [Rank 0] step:341/10000 train_time:22338ms step_avg:65.51ms +[2025-07-05 09:25:07] [Rank 0] step:361/10000 train_time:24351ms step_avg:67.46ms +[2025-07-05 09:25:07] [Rank 0] step:361/10000 train_time:24351ms step_avg:67.46ms +[2025-07-05 09:25:09] [Rank 0] step:381/10000 train_time:25069ms step_avg:65.80ms +[2025-07-05 09:25:09] [Rank 0] step:381/10000 train_time:25069ms step_avg:65.80ms +[2025-07-05 09:25:10] [Rank 0] step:401/10000 train_time:26400ms step_avg:65.84ms +[2025-07-05 09:25:10] [Rank 0] step:401/10000 train_time:26400ms step_avg:65.84ms +[2025-07-05 09:25:11] [Rank 0] step:421/10000 train_time:27731ms step_avg:65.87ms +[2025-07-05 09:25:11] [Rank 0] step:421/10000 train_time:27731ms step_avg:65.87ms +[2025-07-05 09:25:13] [Rank 0] step:441/10000 train_time:29063ms step_avg:65.90ms +[2025-07-05 09:25:13] [Rank 0] step:441/10000 train_time:29063ms step_avg:65.90ms +[2025-07-05 09:25:14] [Rank 0] step:461/10000 train_time:30396ms step_avg:65.93ms +[2025-07-05 09:25:14] [Rank 0] step:461/10000 train_time:30396ms step_avg:65.93ms +[2025-07-05 09:25:15] [Rank 0] step:481/10000 train_time:31729ms step_avg:65.96ms +[2025-07-05 09:25:15] [Rank 0] step:481/10000 train_time:31729ms step_avg:65.96ms +[2025-07-05 09:25:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:25:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:25:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7431 val_loss:7.1363 train_time:33669ms step_avg:67.34ms +[2025-07-05 09:25:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7431 val_loss:7.1363 train_time:33669ms step_avg:67.34ms +[2025-07-05 09:25:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:25:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b3d8366297762c3400af22ee5a4008b208550034 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0b68ba33-79c4-4a8c-9f91-283b24d6e622", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_0b68ba33-79c4-4a8c-9f91-283b24d6e622.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_0b68ba33-79c4-4a8c-9f91-283b24d6e622.txt new file mode 100644 index 0000000000000000000000000000000000000000..be285e597151ee9a047095bb62fa255087bfc904 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_0b68ba33-79c4-4a8c-9f91-283b24d6e622.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:50:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:50:31 2025 --- +[2025-07-05 09:50:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:50:31 2025 --- +[2025-07-05 09:50:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:50:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:50:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:50:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:50:31] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:50:31] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:50:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45 +[2025-07-05 09:50:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45 +[2025-07-05 09:50:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:50:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:50:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:50:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:50:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:50:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:50:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:50:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:50:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:50:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:50:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:50:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:50:35] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:50:35] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:50:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:50:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:50:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:50:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:50:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:50:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:50:35] [Rank 0] PRINT: Model returns: +[2025-07-05 09:50:35] [Rank 0] PRINT: Model returns: +[2025-07-05 09:50:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:50:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:50:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:50:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:50:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:50:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:50:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:50:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:50:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:50:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:50:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:50:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:50:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:50:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:50:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:50:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:51:39] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:51:39] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:51:39] [Rank 0] PRINT: Starting training... +[2025-07-05 09:51:39] [Rank 0] PRINT: Starting training... +[2025-07-05 09:51:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:51:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:51:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:51:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:51:48] [Rank 0] step:21/10000 train_time:813ms step_avg:38.72ms +[2025-07-05 09:51:48] [Rank 0] step:21/10000 train_time:813ms step_avg:38.72ms +[2025-07-05 09:51:49] [Rank 0] step:41/10000 train_time:2140ms step_avg:52.18ms +[2025-07-05 09:51:49] [Rank 0] step:41/10000 train_time:2140ms step_avg:52.18ms +[2025-07-05 09:51:50] [Rank 0] step:61/10000 train_time:3467ms step_avg:56.83ms +[2025-07-05 09:51:50] [Rank 0] step:61/10000 train_time:3467ms step_avg:56.83ms +[2025-07-05 09:51:51] [Rank 0] step:81/10000 train_time:4794ms step_avg:59.19ms +[2025-07-05 09:51:51] [Rank 0] step:81/10000 train_time:4794ms step_avg:59.19ms +[2025-07-05 09:51:53] [Rank 0] step:101/10000 train_time:6120ms step_avg:60.60ms +[2025-07-05 09:51:53] [Rank 0] step:101/10000 train_time:6120ms step_avg:60.60ms +[2025-07-05 09:51:54] [Rank 0] step:121/10000 train_time:7446ms step_avg:61.54ms +[2025-07-05 09:51:54] [Rank 0] step:121/10000 train_time:7446ms step_avg:61.54ms +[2025-07-05 09:51:55] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 09:51:55] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 09:51:57] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 09:51:57] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 09:51:58] [Rank 0] step:181/10000 train_time:12110ms step_avg:66.91ms +[2025-07-05 09:51:58] [Rank 0] step:181/10000 train_time:12110ms step_avg:66.91ms +[2025-07-05 09:52:00] [Rank 0] step:201/10000 train_time:12828ms step_avg:63.82ms +[2025-07-05 09:52:00] [Rank 0] step:201/10000 train_time:12828ms step_avg:63.82ms +[2025-07-05 09:52:01] [Rank 0] step:221/10000 train_time:14157ms step_avg:64.06ms +[2025-07-05 09:52:01] [Rank 0] step:221/10000 train_time:14157ms step_avg:64.06ms +[2025-07-05 09:52:02] [Rank 0] step:241/10000 train_time:15514ms step_avg:64.38ms +[2025-07-05 09:52:02] [Rank 0] step:241/10000 train_time:15514ms step_avg:64.38ms +[2025-07-05 09:52:04] [Rank 0] step:261/10000 train_time:16843ms step_avg:64.53ms +[2025-07-05 09:52:04] [Rank 0] step:261/10000 train_time:16843ms step_avg:64.53ms +[2025-07-05 09:52:05] [Rank 0] step:281/10000 train_time:18171ms step_avg:64.67ms +[2025-07-05 09:52:05] [Rank 0] step:281/10000 train_time:18171ms step_avg:64.67ms +[2025-07-05 09:52:06] [Rank 0] step:301/10000 train_time:19500ms step_avg:64.78ms +[2025-07-05 09:52:06] [Rank 0] step:301/10000 train_time:19500ms step_avg:64.78ms +[2025-07-05 09:52:08] [Rank 0] step:321/10000 train_time:20830ms step_avg:64.89ms +[2025-07-05 09:52:08] [Rank 0] step:321/10000 train_time:20830ms step_avg:64.89ms +[2025-07-05 09:52:09] [Rank 0] step:341/10000 train_time:22160ms step_avg:64.99ms +[2025-07-05 09:52:09] [Rank 0] step:341/10000 train_time:22160ms step_avg:64.99ms +[2025-07-05 09:52:10] [Rank 0] step:361/10000 train_time:23487ms step_avg:65.06ms +[2025-07-05 09:52:10] [Rank 0] step:361/10000 train_time:23487ms step_avg:65.06ms +[2025-07-05 09:52:12] [Rank 0] step:381/10000 train_time:24858ms step_avg:65.24ms +[2025-07-05 09:52:12] [Rank 0] step:381/10000 train_time:24858ms step_avg:65.24ms +[2025-07-05 09:52:13] [Rank 0] step:401/10000 train_time:26187ms step_avg:65.30ms +[2025-07-05 09:52:13] [Rank 0] step:401/10000 train_time:26187ms step_avg:65.30ms +[2025-07-05 09:52:14] [Rank 0] step:421/10000 train_time:27516ms step_avg:65.36ms +[2025-07-05 09:52:14] [Rank 0] step:421/10000 train_time:27516ms step_avg:65.36ms +[2025-07-05 09:52:16] [Rank 0] step:441/10000 train_time:28846ms step_avg:65.41ms +[2025-07-05 09:52:16] [Rank 0] step:441/10000 train_time:28846ms step_avg:65.41ms +[2025-07-05 09:52:17] [Rank 0] step:461/10000 train_time:30176ms step_avg:65.46ms +[2025-07-05 09:52:17] [Rank 0] step:461/10000 train_time:30176ms step_avg:65.46ms +[2025-07-05 09:52:18] [Rank 0] step:481/10000 train_time:31507ms step_avg:65.50ms +[2025-07-05 09:52:18] [Rank 0] step:481/10000 train_time:31507ms step_avg:65.50ms +[2025-07-05 09:52:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:52:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:52:20] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1362 train_time:33442ms step_avg:66.88ms +[2025-07-05 09:52:20] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1362 train_time:33442ms step_avg:66.88ms +[2025-07-05 09:52:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:52:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5d48e7e23df63387d3920e2d049d33549411725f --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "4f212dc3-375c-440d-9d9b-a8161bb3168d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/training_log_4f212dc3-375c-440d-9d9b-a8161bb3168d.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/training_log_4f212dc3-375c-440d-9d9b-a8161bb3168d.txt new file mode 100644 index 0000000000000000000000000000000000000000..75ecb2a27e3db93a4f1e2b4eb6e89cb8a6627b21 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46/training_log_4f212dc3-375c-440d-9d9b-a8161bb3168d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:17:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:17:46 2025 --- +[2025-07-05 10:17:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:17:46 2025 --- +[2025-07-05 10:17:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:17:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:17:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:17:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:17:46] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:17:46] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:17:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46 +[2025-07-05 10:17:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_46 +[2025-07-05 10:17:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:17:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:17:47] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:17:47] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:17:47] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:17:47] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:17:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:17:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:17:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:17:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:17:49] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:17:49] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:17:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:17:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:17:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:17:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:17:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:17:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:17:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:17:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:17:50] [Rank 0] PRINT: Model returns: +[2025-07-05 10:17:50] [Rank 0] PRINT: Model returns: +[2025-07-05 10:17:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:17:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:17:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:17:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:17:50] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 10:17:50] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 10:17:50] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:17:50] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:17:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:17:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:17:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:17:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:17:50] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:17:50] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:17:50] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:17:50] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:18:54] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:18:54] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:18:54] [Rank 0] PRINT: Starting training... +[2025-07-05 10:18:54] [Rank 0] PRINT: Starting training... +[2025-07-05 10:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:19:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:19:03] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.56ms +[2025-07-05 10:19:03] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.56ms +[2025-07-05 10:19:04] [Rank 0] step:41/10000 train_time:2344ms step_avg:57.17ms +[2025-07-05 10:19:04] [Rank 0] step:41/10000 train_time:2344ms step_avg:57.17ms +[2025-07-05 10:19:06] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.17ms +[2025-07-05 10:19:06] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.17ms +[2025-07-05 10:19:07] [Rank 0] step:81/10000 train_time:4996ms step_avg:61.68ms +[2025-07-05 10:19:07] [Rank 0] step:81/10000 train_time:4996ms step_avg:61.68ms +[2025-07-05 10:19:08] [Rank 0] step:101/10000 train_time:6320ms step_avg:62.58ms +[2025-07-05 10:19:08] [Rank 0] step:101/10000 train_time:6320ms step_avg:62.58ms +[2025-07-05 10:19:10] [Rank 0] step:121/10000 train_time:7645ms step_avg:63.18ms +[2025-07-05 10:19:10] [Rank 0] step:121/10000 train_time:7645ms step_avg:63.18ms +[2025-07-05 10:19:11] [Rank 0] step:141/10000 train_time:8969ms step_avg:63.61ms +[2025-07-05 10:19:11] [Rank 0] step:141/10000 train_time:8969ms step_avg:63.61ms +[2025-07-05 10:19:12] [Rank 0] step:161/10000 train_time:10294ms step_avg:63.94ms +[2025-07-05 10:19:12] [Rank 0] step:161/10000 train_time:10294ms step_avg:63.94ms +[2025-07-05 10:19:14] [Rank 0] step:181/10000 train_time:11619ms step_avg:64.19ms +[2025-07-05 10:19:14] [Rank 0] step:181/10000 train_time:11619ms step_avg:64.19ms +[2025-07-05 10:19:15] [Rank 0] step:201/10000 train_time:12992ms step_avg:64.64ms +[2025-07-05 10:19:15] [Rank 0] step:201/10000 train_time:12992ms step_avg:64.64ms +[2025-07-05 10:19:16] [Rank 0] step:221/10000 train_time:14317ms step_avg:64.78ms +[2025-07-05 10:19:16] [Rank 0] step:221/10000 train_time:14317ms step_avg:64.78ms +[2025-07-05 10:19:18] [Rank 0] step:241/10000 train_time:15642ms step_avg:64.90ms +[2025-07-05 10:19:18] [Rank 0] step:241/10000 train_time:15642ms step_avg:64.90ms +[2025-07-05 10:19:19] [Rank 0] step:261/10000 train_time:16967ms step_avg:65.01ms +[2025-07-05 10:19:19] [Rank 0] step:261/10000 train_time:16967ms step_avg:65.01ms +[2025-07-05 10:19:20] [Rank 0] step:281/10000 train_time:18293ms step_avg:65.10ms +[2025-07-05 10:19:20] [Rank 0] step:281/10000 train_time:18293ms step_avg:65.10ms +[2025-07-05 10:19:22] [Rank 0] step:301/10000 train_time:19620ms step_avg:65.18ms +[2025-07-05 10:19:22] [Rank 0] step:301/10000 train_time:19620ms step_avg:65.18ms +[2025-07-05 10:19:23] [Rank 0] step:321/10000 train_time:20949ms step_avg:65.26ms +[2025-07-05 10:19:23] [Rank 0] step:321/10000 train_time:20949ms step_avg:65.26ms +[2025-07-05 10:19:24] [Rank 0] step:341/10000 train_time:22276ms step_avg:65.32ms +[2025-07-05 10:19:24] [Rank 0] step:341/10000 train_time:22276ms step_avg:65.32ms +[2025-07-05 10:19:26] [Rank 0] step:361/10000 train_time:23861ms step_avg:66.10ms +[2025-07-05 10:19:26] [Rank 0] step:361/10000 train_time:23861ms step_avg:66.10ms +[2025-07-05 10:19:27] [Rank 0] step:381/10000 train_time:24951ms step_avg:65.49ms +[2025-07-05 10:19:27] [Rank 0] step:381/10000 train_time:24951ms step_avg:65.49ms +[2025-07-05 10:19:28] [Rank 0] step:401/10000 train_time:26279ms step_avg:65.53ms +[2025-07-05 10:19:28] [Rank 0] step:401/10000 train_time:26279ms step_avg:65.53ms +[2025-07-05 10:19:30] [Rank 0] step:421/10000 train_time:27607ms step_avg:65.58ms +[2025-07-05 10:19:30] [Rank 0] step:421/10000 train_time:27607ms step_avg:65.58ms +[2025-07-05 10:19:31] [Rank 0] step:441/10000 train_time:28936ms step_avg:65.61ms +[2025-07-05 10:19:31] [Rank 0] step:441/10000 train_time:28936ms step_avg:65.61ms +[2025-07-05 10:19:32] [Rank 0] step:461/10000 train_time:30265ms step_avg:65.65ms +[2025-07-05 10:19:32] [Rank 0] step:461/10000 train_time:30265ms step_avg:65.65ms +[2025-07-05 10:19:34] [Rank 0] step:481/10000 train_time:31594ms step_avg:65.68ms +[2025-07-05 10:19:34] [Rank 0] step:481/10000 train_time:31594ms step_avg:65.68ms +[2025-07-05 10:19:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:36] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1357 train_time:33527ms step_avg:67.05ms +[2025-07-05 10:19:36] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1357 train_time:33527ms step_avg:67.05ms +[2025-07-05 10:19:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:19:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4a156a1461faa5b532c0c27d843ffb54c0b3aec3 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "bb1032a2-858b-4dda-96d5-c35694578ba8", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/training_log_bb1032a2-858b-4dda-96d5-c35694578ba8.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/training_log_bb1032a2-858b-4dda-96d5-c35694578ba8.txt new file mode 100644 index 0000000000000000000000000000000000000000..fe5527de7e4857ae1ef707b83826564e4ae8a430 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47/training_log_bb1032a2-858b-4dda-96d5-c35694578ba8.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:37:16] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:37:16 2025 --- +[2025-07-05 08:37:16] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:37:16 2025 --- +[2025-07-05 08:37:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:37:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 08:37:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:37:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:37:16] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:37:16] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:37:16] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47 +[2025-07-05 08:37:16] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_47 +[2025-07-05 08:37:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:37:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:37:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:37:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:37:17] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:37:17] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:37:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:37:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:37:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:37:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:37:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:37:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:37:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:37:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:37:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:37:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:37:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:37:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:37:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:37:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:37:20] [Rank 0] PRINT: Model returns: +[2025-07-05 08:37:20] [Rank 0] PRINT: Model returns: +[2025-07-05 08:37:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:37:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:37:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:37:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:37:20] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:37:20] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 08:37:20] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:37:20] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:37:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:37:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:37:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:37:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:37:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:37:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:37:20] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:37:20] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:38:23] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:38:23] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:38:23] [Rank 0] PRINT: Starting training... +[2025-07-05 08:38:23] [Rank 0] PRINT: Starting training... +[2025-07-05 08:38:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:38:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:38:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:38:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:38:32] [Rank 0] step:21/10000 train_time:816ms step_avg:38.86ms +[2025-07-05 08:38:32] [Rank 0] step:21/10000 train_time:816ms step_avg:38.86ms +[2025-07-05 08:38:33] [Rank 0] step:41/10000 train_time:2138ms step_avg:52.15ms +[2025-07-05 08:38:33] [Rank 0] step:41/10000 train_time:2138ms step_avg:52.15ms +[2025-07-05 08:38:34] [Rank 0] step:61/10000 train_time:3462ms step_avg:56.75ms +[2025-07-05 08:38:34] [Rank 0] step:61/10000 train_time:3462ms step_avg:56.75ms +[2025-07-05 08:38:36] [Rank 0] step:81/10000 train_time:4787ms step_avg:59.09ms +[2025-07-05 08:38:36] [Rank 0] step:81/10000 train_time:4787ms step_avg:59.09ms +[2025-07-05 08:38:37] [Rank 0] step:101/10000 train_time:6111ms step_avg:60.50ms +[2025-07-05 08:38:37] [Rank 0] step:101/10000 train_time:6111ms step_avg:60.50ms +[2025-07-05 08:38:38] [Rank 0] step:121/10000 train_time:7435ms step_avg:61.44ms +[2025-07-05 08:38:38] [Rank 0] step:121/10000 train_time:7435ms step_avg:61.44ms +[2025-07-05 08:38:40] [Rank 0] step:141/10000 train_time:8759ms step_avg:62.12ms +[2025-07-05 08:38:40] [Rank 0] step:141/10000 train_time:8759ms step_avg:62.12ms +[2025-07-05 08:38:41] [Rank 0] step:161/10000 train_time:10084ms step_avg:62.64ms +[2025-07-05 08:38:41] [Rank 0] step:161/10000 train_time:10084ms step_avg:62.64ms +[2025-07-05 08:38:42] [Rank 0] step:181/10000 train_time:11408ms step_avg:63.03ms +[2025-07-05 08:38:42] [Rank 0] step:181/10000 train_time:11408ms step_avg:63.03ms +[2025-07-05 08:38:44] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-05 08:38:44] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-05 08:38:45] [Rank 0] step:221/10000 train_time:14118ms step_avg:63.88ms +[2025-07-05 08:38:45] [Rank 0] step:221/10000 train_time:14118ms step_avg:63.88ms +[2025-07-05 08:38:46] [Rank 0] step:241/10000 train_time:15444ms step_avg:64.08ms +[2025-07-05 08:38:46] [Rank 0] step:241/10000 train_time:15444ms step_avg:64.08ms +[2025-07-05 08:38:48] [Rank 0] step:261/10000 train_time:16771ms step_avg:64.26ms +[2025-07-05 08:38:48] [Rank 0] step:261/10000 train_time:16771ms step_avg:64.26ms +[2025-07-05 08:38:49] [Rank 0] step:281/10000 train_time:18097ms step_avg:64.40ms +[2025-07-05 08:38:49] [Rank 0] step:281/10000 train_time:18097ms step_avg:64.40ms +[2025-07-05 08:38:50] [Rank 0] step:301/10000 train_time:19424ms step_avg:64.53ms +[2025-07-05 08:38:50] [Rank 0] step:301/10000 train_time:19424ms step_avg:64.53ms +[2025-07-05 08:38:52] [Rank 0] step:321/10000 train_time:20750ms step_avg:64.64ms +[2025-07-05 08:38:52] [Rank 0] step:321/10000 train_time:20750ms step_avg:64.64ms +[2025-07-05 08:38:53] [Rank 0] step:341/10000 train_time:22077ms step_avg:64.74ms +[2025-07-05 08:38:53] [Rank 0] step:341/10000 train_time:22077ms step_avg:64.74ms +[2025-07-05 08:38:54] [Rank 0] step:361/10000 train_time:23658ms step_avg:65.54ms +[2025-07-05 08:38:54] [Rank 0] step:361/10000 train_time:23658ms step_avg:65.54ms +[2025-07-05 08:38:56] [Rank 0] step:381/10000 train_time:24794ms step_avg:65.08ms +[2025-07-05 08:38:56] [Rank 0] step:381/10000 train_time:24794ms step_avg:65.08ms +[2025-07-05 08:38:57] [Rank 0] step:401/10000 train_time:26123ms step_avg:65.14ms +[2025-07-05 08:38:57] [Rank 0] step:401/10000 train_time:26123ms step_avg:65.14ms +[2025-07-05 08:38:58] [Rank 0] step:421/10000 train_time:27451ms step_avg:65.20ms +[2025-07-05 08:38:58] [Rank 0] step:421/10000 train_time:27451ms step_avg:65.20ms +[2025-07-05 08:39:00] [Rank 0] step:441/10000 train_time:28780ms step_avg:65.26ms +[2025-07-05 08:39:00] [Rank 0] step:441/10000 train_time:28780ms step_avg:65.26ms +[2025-07-05 08:39:01] [Rank 0] step:461/10000 train_time:30108ms step_avg:65.31ms +[2025-07-05 08:39:01] [Rank 0] step:461/10000 train_time:30108ms step_avg:65.31ms +[2025-07-05 08:39:02] [Rank 0] step:481/10000 train_time:31437ms step_avg:65.36ms +[2025-07-05 08:39:02] [Rank 0] step:481/10000 train_time:31437ms step_avg:65.36ms +[2025-07-05 08:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:39:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:39:04] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1363 train_time:33370ms step_avg:66.74ms +[2025-07-05 08:39:04] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1363 train_time:33370ms step_avg:66.74ms +[2025-07-05 08:39:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:39:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7e186f6bb803284c95645cec37c4c9ae1f37e192 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1eca1ad2-dbe3-49a4-9391-6b46b44c59d2", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_1eca1ad2-dbe3-49a4-9391-6b46b44c59d2.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_1eca1ad2-dbe3-49a4-9391-6b46b44c59d2.txt new file mode 100644 index 0000000000000000000000000000000000000000..187f1a36cda8afc65d8d05a4546d4f1ed5f16fc8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_1eca1ad2-dbe3-49a4-9391-6b46b44c59d2.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:04:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:04:39 2025 --- +[2025-07-05 09:04:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:04:39 2025 --- +[2025-07-05 09:04:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:04:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:04:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:04:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:04:39] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:04:39] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:04:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48 +[2025-07-05 09:04:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48 +[2025-07-05 09:04:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:04:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:04:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:04:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:04:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:04:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:04:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:04:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:04:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:04:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:04:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:04:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:04:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:04:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:04:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:04:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:04:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:04:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:04:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:04:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:04:43] [Rank 0] PRINT: Model returns: +[2025-07-05 09:04:43] [Rank 0] PRINT: Model returns: +[2025-07-05 09:04:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:04:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:04:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:04:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:04:43] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:04:43] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:04:43] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:04:43] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:04:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:04:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:04:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:04:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:04:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:04:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:04:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:04:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:05:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:05:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:05:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:05:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:05:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:05:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:05:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:05:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:05:57] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.62ms +[2025-07-05 09:05:57] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.62ms +[2025-07-05 09:05:59] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.26ms +[2025-07-05 09:05:59] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.26ms +[2025-07-05 09:06:00] [Rank 0] step:61/10000 train_time:3675ms step_avg:60.25ms +[2025-07-05 09:06:00] [Rank 0] step:61/10000 train_time:3675ms step_avg:60.25ms +[2025-07-05 09:06:01] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.77ms +[2025-07-05 09:06:01] [Rank 0] step:81/10000 train_time:5003ms step_avg:61.77ms +[2025-07-05 09:06:03] [Rank 0] step:101/10000 train_time:6333ms step_avg:62.70ms +[2025-07-05 09:06:03] [Rank 0] step:101/10000 train_time:6333ms step_avg:62.70ms +[2025-07-05 09:06:04] [Rank 0] step:121/10000 train_time:7661ms step_avg:63.32ms +[2025-07-05 09:06:04] [Rank 0] step:121/10000 train_time:7661ms step_avg:63.32ms +[2025-07-05 09:06:05] [Rank 0] step:141/10000 train_time:8989ms step_avg:63.75ms +[2025-07-05 09:06:05] [Rank 0] step:141/10000 train_time:8989ms step_avg:63.75ms +[2025-07-05 09:06:07] [Rank 0] step:161/10000 train_time:10319ms step_avg:64.10ms +[2025-07-05 09:06:07] [Rank 0] step:161/10000 train_time:10319ms step_avg:64.10ms +[2025-07-05 09:06:08] [Rank 0] step:181/10000 train_time:11697ms step_avg:64.63ms +[2025-07-05 09:06:08] [Rank 0] step:181/10000 train_time:11697ms step_avg:64.63ms +[2025-07-05 09:06:09] [Rank 0] step:201/10000 train_time:12980ms step_avg:64.58ms +[2025-07-05 09:06:09] [Rank 0] step:201/10000 train_time:12980ms step_avg:64.58ms +[2025-07-05 09:06:11] [Rank 0] step:221/10000 train_time:14310ms step_avg:64.75ms +[2025-07-05 09:06:11] [Rank 0] step:221/10000 train_time:14310ms step_avg:64.75ms +[2025-07-05 09:06:12] [Rank 0] step:241/10000 train_time:15640ms step_avg:64.90ms +[2025-07-05 09:06:12] [Rank 0] step:241/10000 train_time:15640ms step_avg:64.90ms +[2025-07-05 09:06:13] [Rank 0] step:261/10000 train_time:16970ms step_avg:65.02ms +[2025-07-05 09:06:13] [Rank 0] step:261/10000 train_time:16970ms step_avg:65.02ms +[2025-07-05 09:06:15] [Rank 0] step:281/10000 train_time:18301ms step_avg:65.13ms +[2025-07-05 09:06:15] [Rank 0] step:281/10000 train_time:18301ms step_avg:65.13ms +[2025-07-05 09:06:16] [Rank 0] step:301/10000 train_time:19633ms step_avg:65.23ms +[2025-07-05 09:06:16] [Rank 0] step:301/10000 train_time:19633ms step_avg:65.23ms +[2025-07-05 09:06:17] [Rank 0] step:321/10000 train_time:20965ms step_avg:65.31ms +[2025-07-05 09:06:17] [Rank 0] step:321/10000 train_time:20965ms step_avg:65.31ms +[2025-07-05 09:06:19] [Rank 0] step:341/10000 train_time:22297ms step_avg:65.39ms +[2025-07-05 09:06:19] [Rank 0] step:341/10000 train_time:22297ms step_avg:65.39ms +[2025-07-05 09:06:20] [Rank 0] step:361/10000 train_time:23882ms step_avg:66.15ms +[2025-07-05 09:06:20] [Rank 0] step:361/10000 train_time:23882ms step_avg:66.15ms +[2025-07-05 09:06:21] [Rank 0] step:381/10000 train_time:25005ms step_avg:65.63ms +[2025-07-05 09:06:21] [Rank 0] step:381/10000 train_time:25005ms step_avg:65.63ms +[2025-07-05 09:06:23] [Rank 0] step:401/10000 train_time:26338ms step_avg:65.68ms +[2025-07-05 09:06:23] [Rank 0] step:401/10000 train_time:26338ms step_avg:65.68ms +[2025-07-05 09:06:24] [Rank 0] step:421/10000 train_time:27670ms step_avg:65.73ms +[2025-07-05 09:06:24] [Rank 0] step:421/10000 train_time:27670ms step_avg:65.73ms +[2025-07-05 09:06:25] [Rank 0] step:441/10000 train_time:29005ms step_avg:65.77ms +[2025-07-05 09:06:25] [Rank 0] step:441/10000 train_time:29005ms step_avg:65.77ms +[2025-07-05 09:06:27] [Rank 0] step:461/10000 train_time:30337ms step_avg:65.81ms +[2025-07-05 09:06:27] [Rank 0] step:461/10000 train_time:30337ms step_avg:65.81ms +[2025-07-05 09:06:28] [Rank 0] step:481/10000 train_time:31669ms step_avg:65.84ms +[2025-07-05 09:06:28] [Rank 0] step:481/10000 train_time:31669ms step_avg:65.84ms +[2025-07-05 09:06:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:06:30] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1360 train_time:33614ms step_avg:67.23ms +[2025-07-05 09:06:30] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1360 train_time:33614ms step_avg:67.23ms +[2025-07-05 09:06:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:06:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..40500857293c06c902fbdd9d4b4f6bc9d61bfb1a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "959ca817-4207-46ed-80dd-beb64dac9062", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/training_log_959ca817-4207-46ed-80dd-beb64dac9062.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/training_log_959ca817-4207-46ed-80dd-beb64dac9062.txt new file mode 100644 index 0000000000000000000000000000000000000000..846ca6b6d052d281cee72f6fa92c3f94051abe1d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49/training_log_959ca817-4207-46ed-80dd-beb64dac9062.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:32:08] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:32:08 2025 --- +[2025-07-05 09:32:08] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:32:08 2025 --- +[2025-07-05 09:32:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:32:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:32:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:32:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:32:08] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:32:08] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:32:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49 +[2025-07-05 09:32:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_49 +[2025-07-05 09:32:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:32:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:32:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:32:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:32:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:32:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:32:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:32:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:32:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:32:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:32:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:32:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:32:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:32:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:32:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:32:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:32:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:32:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:32:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:32:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:32:12] [Rank 0] PRINT: Model returns: +[2025-07-05 09:32:12] [Rank 0] PRINT: Model returns: +[2025-07-05 09:32:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:32:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:32:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:32:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:32:12] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:32:12] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:32:12] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:32:12] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:32:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:32:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:32:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:32:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:32:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:32:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:32:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:32:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:33:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:33:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:33:15] [Rank 0] PRINT: Starting training... +[2025-07-05 09:33:15] [Rank 0] PRINT: Starting training... +[2025-07-05 09:33:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:33:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:33:24] [Rank 0] step:21/10000 train_time:824ms step_avg:39.25ms +[2025-07-05 09:33:24] [Rank 0] step:21/10000 train_time:824ms step_avg:39.25ms +[2025-07-05 09:33:25] [Rank 0] step:41/10000 train_time:2151ms step_avg:52.45ms +[2025-07-05 09:33:25] [Rank 0] step:41/10000 train_time:2151ms step_avg:52.45ms +[2025-07-05 09:33:27] [Rank 0] step:61/10000 train_time:3477ms step_avg:57.00ms +[2025-07-05 09:33:27] [Rank 0] step:61/10000 train_time:3477ms step_avg:57.00ms +[2025-07-05 09:33:28] [Rank 0] step:81/10000 train_time:4807ms step_avg:59.34ms +[2025-07-05 09:33:28] [Rank 0] step:81/10000 train_time:4807ms step_avg:59.34ms +[2025-07-05 09:33:29] [Rank 0] step:101/10000 train_time:6136ms step_avg:60.75ms +[2025-07-05 09:33:29] [Rank 0] step:101/10000 train_time:6136ms step_avg:60.75ms +[2025-07-05 09:33:31] [Rank 0] step:121/10000 train_time:7465ms step_avg:61.69ms +[2025-07-05 09:33:31] [Rank 0] step:121/10000 train_time:7465ms step_avg:61.69ms +[2025-07-05 09:33:32] [Rank 0] step:141/10000 train_time:8793ms step_avg:62.36ms +[2025-07-05 09:33:32] [Rank 0] step:141/10000 train_time:8793ms step_avg:62.36ms +[2025-07-05 09:33:33] [Rank 0] step:161/10000 train_time:10122ms step_avg:62.87ms +[2025-07-05 09:33:33] [Rank 0] step:161/10000 train_time:10122ms step_avg:62.87ms +[2025-07-05 09:33:35] [Rank 0] step:181/10000 train_time:12118ms step_avg:66.95ms +[2025-07-05 09:33:35] [Rank 0] step:181/10000 train_time:12118ms step_avg:66.95ms +[2025-07-05 09:33:36] [Rank 0] step:201/10000 train_time:12835ms step_avg:63.85ms +[2025-07-05 09:33:36] [Rank 0] step:201/10000 train_time:12835ms step_avg:63.85ms +[2025-07-05 09:33:37] [Rank 0] step:221/10000 train_time:14164ms step_avg:64.09ms +[2025-07-05 09:33:37] [Rank 0] step:221/10000 train_time:14164ms step_avg:64.09ms +[2025-07-05 09:33:39] [Rank 0] step:241/10000 train_time:15495ms step_avg:64.29ms +[2025-07-05 09:33:39] [Rank 0] step:241/10000 train_time:15495ms step_avg:64.29ms +[2025-07-05 09:33:40] [Rank 0] step:261/10000 train_time:16825ms step_avg:64.46ms +[2025-07-05 09:33:40] [Rank 0] step:261/10000 train_time:16825ms step_avg:64.46ms +[2025-07-05 09:33:41] [Rank 0] step:281/10000 train_time:18155ms step_avg:64.61ms +[2025-07-05 09:33:41] [Rank 0] step:281/10000 train_time:18155ms step_avg:64.61ms +[2025-07-05 09:33:43] [Rank 0] step:301/10000 train_time:19486ms step_avg:64.74ms +[2025-07-05 09:33:43] [Rank 0] step:301/10000 train_time:19486ms step_avg:64.74ms +[2025-07-05 09:33:44] [Rank 0] step:321/10000 train_time:20818ms step_avg:64.85ms +[2025-07-05 09:33:44] [Rank 0] step:321/10000 train_time:20818ms step_avg:64.85ms +[2025-07-05 09:33:45] [Rank 0] step:341/10000 train_time:22150ms step_avg:64.96ms +[2025-07-05 09:33:45] [Rank 0] step:341/10000 train_time:22150ms step_avg:64.96ms +[2025-07-05 09:33:47] [Rank 0] step:361/10000 train_time:24150ms step_avg:66.90ms +[2025-07-05 09:33:47] [Rank 0] step:361/10000 train_time:24150ms step_avg:66.90ms +[2025-07-05 09:33:48] [Rank 0] step:381/10000 train_time:24867ms step_avg:65.27ms +[2025-07-05 09:33:48] [Rank 0] step:381/10000 train_time:24867ms step_avg:65.27ms +[2025-07-05 09:33:49] [Rank 0] step:401/10000 train_time:26199ms step_avg:65.34ms +[2025-07-05 09:33:49] [Rank 0] step:401/10000 train_time:26199ms step_avg:65.34ms +[2025-07-05 09:33:51] [Rank 0] step:421/10000 train_time:27532ms step_avg:65.40ms +[2025-07-05 09:33:51] [Rank 0] step:421/10000 train_time:27532ms step_avg:65.40ms +[2025-07-05 09:33:52] [Rank 0] step:441/10000 train_time:28866ms step_avg:65.46ms +[2025-07-05 09:33:52] [Rank 0] step:441/10000 train_time:28866ms step_avg:65.46ms +[2025-07-05 09:33:53] [Rank 0] step:461/10000 train_time:30199ms step_avg:65.51ms +[2025-07-05 09:33:53] [Rank 0] step:461/10000 train_time:30199ms step_avg:65.51ms +[2025-07-05 09:33:55] [Rank 0] step:481/10000 train_time:31560ms step_avg:65.61ms +[2025-07-05 09:33:55] [Rank 0] step:481/10000 train_time:31560ms step_avg:65.61ms +[2025-07-05 09:33:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:33:57] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1363 train_time:33502ms step_avg:67.00ms +[2025-07-05 09:33:57] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1363 train_time:33502ms step_avg:67.00ms +[2025-07-05 09:33:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:33:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..712bd17ed7e422ca126661dd69c10aee31024ff5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f6e8c3cc-9f85-41ea-a835-8ca04f12cc12", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/training_log_f6e8c3cc-9f85-41ea-a835-8ca04f12cc12.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/training_log_f6e8c3cc-9f85-41ea-a835-8ca04f12cc12.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c4b6e00729380d621bf4facdb8268ec7e9e1ac0 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50/training_log_f6e8c3cc-9f85-41ea-a835-8ca04f12cc12.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:59:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:59:29 2025 --- +[2025-07-05 09:59:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:59:29 2025 --- +[2025-07-05 09:59:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:59:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 09:59:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:59:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:59:29] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:59:29] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:59:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50 +[2025-07-05 09:59:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_50 +[2025-07-05 09:59:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:59:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:59:29] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:59:29] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:59:29] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:59:29] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:59:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:59:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:59:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:59:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:59:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:59:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:59:32] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:59:32] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:59:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:59:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:59:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:59:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:59:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:59:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:59:32] [Rank 0] PRINT: Model returns: +[2025-07-05 09:59:32] [Rank 0] PRINT: Model returns: +[2025-07-05 09:59:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:59:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:59:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:59:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:59:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:59:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 09:59:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:59:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:59:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:59:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:59:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:59:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:59:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:59:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:59:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:59:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:00:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:00:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:00:36] [Rank 0] PRINT: Starting training... +[2025-07-05 10:00:36] [Rank 0] PRINT: Starting training... +[2025-07-05 10:00:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:00:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:00:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:00:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:00:44] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.60ms +[2025-07-05 10:00:44] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.60ms +[2025-07-05 10:00:46] [Rank 0] step:41/10000 train_time:2347ms step_avg:57.24ms +[2025-07-05 10:00:46] [Rank 0] step:41/10000 train_time:2347ms step_avg:57.24ms +[2025-07-05 10:00:47] [Rank 0] step:61/10000 train_time:3674ms step_avg:60.22ms +[2025-07-05 10:00:47] [Rank 0] step:61/10000 train_time:3674ms step_avg:60.22ms +[2025-07-05 10:00:48] [Rank 0] step:81/10000 train_time:5000ms step_avg:61.72ms +[2025-07-05 10:00:48] [Rank 0] step:81/10000 train_time:5000ms step_avg:61.72ms +[2025-07-05 10:00:50] [Rank 0] step:101/10000 train_time:6326ms step_avg:62.63ms +[2025-07-05 10:00:50] [Rank 0] step:101/10000 train_time:6326ms step_avg:62.63ms +[2025-07-05 10:00:51] [Rank 0] step:121/10000 train_time:7652ms step_avg:63.24ms +[2025-07-05 10:00:51] [Rank 0] step:121/10000 train_time:7652ms step_avg:63.24ms +[2025-07-05 10:00:52] [Rank 0] step:141/10000 train_time:8980ms step_avg:63.69ms +[2025-07-05 10:00:52] [Rank 0] step:141/10000 train_time:8980ms step_avg:63.69ms +[2025-07-05 10:00:54] [Rank 0] step:161/10000 train_time:10308ms step_avg:64.02ms +[2025-07-05 10:00:54] [Rank 0] step:161/10000 train_time:10308ms step_avg:64.02ms +[2025-07-05 10:00:55] [Rank 0] step:181/10000 train_time:11635ms step_avg:64.28ms +[2025-07-05 10:00:55] [Rank 0] step:181/10000 train_time:11635ms step_avg:64.28ms +[2025-07-05 10:00:56] [Rank 0] step:201/10000 train_time:13022ms step_avg:64.79ms +[2025-07-05 10:00:56] [Rank 0] step:201/10000 train_time:13022ms step_avg:64.79ms +[2025-07-05 10:00:58] [Rank 0] step:221/10000 train_time:14349ms step_avg:64.93ms +[2025-07-05 10:00:58] [Rank 0] step:221/10000 train_time:14349ms step_avg:64.93ms +[2025-07-05 10:00:59] [Rank 0] step:241/10000 train_time:15677ms step_avg:65.05ms +[2025-07-05 10:00:59] [Rank 0] step:241/10000 train_time:15677ms step_avg:65.05ms +[2025-07-05 10:01:00] [Rank 0] step:261/10000 train_time:17006ms step_avg:65.16ms +[2025-07-05 10:01:00] [Rank 0] step:261/10000 train_time:17006ms step_avg:65.16ms +[2025-07-05 10:01:02] [Rank 0] step:281/10000 train_time:18334ms step_avg:65.25ms +[2025-07-05 10:01:02] [Rank 0] step:281/10000 train_time:18334ms step_avg:65.25ms +[2025-07-05 10:01:03] [Rank 0] step:301/10000 train_time:19663ms step_avg:65.33ms +[2025-07-05 10:01:03] [Rank 0] step:301/10000 train_time:19663ms step_avg:65.33ms +[2025-07-05 10:01:04] [Rank 0] step:321/10000 train_time:20992ms step_avg:65.40ms +[2025-07-05 10:01:04] [Rank 0] step:321/10000 train_time:20992ms step_avg:65.40ms +[2025-07-05 10:01:06] [Rank 0] step:341/10000 train_time:22321ms step_avg:65.46ms +[2025-07-05 10:01:06] [Rank 0] step:341/10000 train_time:22321ms step_avg:65.46ms +[2025-07-05 10:01:07] [Rank 0] step:361/10000 train_time:23651ms step_avg:65.51ms +[2025-07-05 10:01:07] [Rank 0] step:361/10000 train_time:23651ms step_avg:65.51ms +[2025-07-05 10:01:09] [Rank 0] step:381/10000 train_time:25060ms step_avg:65.77ms +[2025-07-05 10:01:09] [Rank 0] step:381/10000 train_time:25060ms step_avg:65.77ms +[2025-07-05 10:01:10] [Rank 0] step:401/10000 train_time:26390ms step_avg:65.81ms +[2025-07-05 10:01:10] [Rank 0] step:401/10000 train_time:26390ms step_avg:65.81ms +[2025-07-05 10:01:11] [Rank 0] step:421/10000 train_time:27719ms step_avg:65.84ms +[2025-07-05 10:01:11] [Rank 0] step:421/10000 train_time:27719ms step_avg:65.84ms +[2025-07-05 10:01:13] [Rank 0] step:441/10000 train_time:29049ms step_avg:65.87ms +[2025-07-05 10:01:13] [Rank 0] step:441/10000 train_time:29049ms step_avg:65.87ms +[2025-07-05 10:01:14] [Rank 0] step:461/10000 train_time:30378ms step_avg:65.90ms +[2025-07-05 10:01:14] [Rank 0] step:461/10000 train_time:30378ms step_avg:65.90ms +[2025-07-05 10:01:15] [Rank 0] step:481/10000 train_time:31709ms step_avg:65.92ms +[2025-07-05 10:01:15] [Rank 0] step:481/10000 train_time:31709ms step_avg:65.92ms +[2025-07-05 10:01:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:01:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:01:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1362 train_time:33644ms step_avg:67.29ms +[2025-07-05 10:01:17] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1362 train_time:33644ms step_avg:67.29ms +[2025-07-05 10:01:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:01:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c05af06631ced61d39e309e4afecee97280dc35a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9442b760-aac7-4def-a1cd-a2b605809d95", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/training_log_9442b760-aac7-4def-a1cd-a2b605809d95.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/training_log_9442b760-aac7-4def-a1cd-a2b605809d95.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d300dbc932964b9008de77e019af670a755b5bb --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51/training_log_9442b760-aac7-4def-a1cd-a2b605809d95.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:27:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:27:03 2025 --- +[2025-07-05 10:27:03] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:27:03 2025 --- +[2025-07-05 10:27:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:27:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-05 10:27:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:27:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:27:03] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:27:03] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:27:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51 +[2025-07-05 10:27:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_51 +[2025-07-05 10:27:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:27:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:27:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:27:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:27:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:27:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:27:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:27:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:27:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:27:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:27:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:27:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:27:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:27:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:27:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:27:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:27:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:27:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:27:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:27:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:27:07] [Rank 0] PRINT: Model returns: +[2025-07-05 10:27:07] [Rank 0] PRINT: Model returns: +[2025-07-05 10:27:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:27:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:27:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:27:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:27:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 10:27:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-05 10:27:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:27:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:27:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:27:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:27:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:27:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:27:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:27:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:27:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:27:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:28:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:28:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:28:10] [Rank 0] PRINT: Starting training... +[2025-07-05 10:28:10] [Rank 0] PRINT: Starting training... +[2025-07-05 10:28:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:28:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:28:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:28:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:28:19] [Rank 0] step:21/10000 train_time:921ms step_avg:43.86ms +[2025-07-05 10:28:19] [Rank 0] step:21/10000 train_time:921ms step_avg:43.86ms +[2025-07-05 10:28:20] [Rank 0] step:41/10000 train_time:2246ms step_avg:54.77ms +[2025-07-05 10:28:20] [Rank 0] step:41/10000 train_time:2246ms step_avg:54.77ms +[2025-07-05 10:28:21] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.55ms +[2025-07-05 10:28:21] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.55ms +[2025-07-05 10:28:23] [Rank 0] step:81/10000 train_time:4897ms step_avg:60.45ms +[2025-07-05 10:28:23] [Rank 0] step:81/10000 train_time:4897ms step_avg:60.45ms +[2025-07-05 10:28:24] [Rank 0] step:101/10000 train_time:6222ms step_avg:61.60ms +[2025-07-05 10:28:24] [Rank 0] step:101/10000 train_time:6222ms step_avg:61.60ms +[2025-07-05 10:28:25] [Rank 0] step:121/10000 train_time:7547ms step_avg:62.37ms +[2025-07-05 10:28:25] [Rank 0] step:121/10000 train_time:7547ms step_avg:62.37ms +[2025-07-05 10:28:27] [Rank 0] step:141/10000 train_time:8873ms step_avg:62.93ms +[2025-07-05 10:28:27] [Rank 0] step:141/10000 train_time:8873ms step_avg:62.93ms +[2025-07-05 10:28:28] [Rank 0] step:161/10000 train_time:10199ms step_avg:63.35ms +[2025-07-05 10:28:28] [Rank 0] step:161/10000 train_time:10199ms step_avg:63.35ms +[2025-07-05 10:28:29] [Rank 0] step:181/10000 train_time:11524ms step_avg:63.67ms +[2025-07-05 10:28:29] [Rank 0] step:181/10000 train_time:11524ms step_avg:63.67ms +[2025-07-05 10:28:31] [Rank 0] step:201/10000 train_time:12899ms step_avg:64.17ms +[2025-07-05 10:28:31] [Rank 0] step:201/10000 train_time:12899ms step_avg:64.17ms +[2025-07-05 10:28:32] [Rank 0] step:221/10000 train_time:14226ms step_avg:64.37ms +[2025-07-05 10:28:32] [Rank 0] step:221/10000 train_time:14226ms step_avg:64.37ms +[2025-07-05 10:28:33] [Rank 0] step:241/10000 train_time:15553ms step_avg:64.53ms +[2025-07-05 10:28:33] [Rank 0] step:241/10000 train_time:15553ms step_avg:64.53ms +[2025-07-05 10:28:35] [Rank 0] step:261/10000 train_time:16879ms step_avg:64.67ms +[2025-07-05 10:28:35] [Rank 0] step:261/10000 train_time:16879ms step_avg:64.67ms +[2025-07-05 10:28:36] [Rank 0] step:281/10000 train_time:18206ms step_avg:64.79ms +[2025-07-05 10:28:36] [Rank 0] step:281/10000 train_time:18206ms step_avg:64.79ms +[2025-07-05 10:28:37] [Rank 0] step:301/10000 train_time:19532ms step_avg:64.89ms +[2025-07-05 10:28:37] [Rank 0] step:301/10000 train_time:19532ms step_avg:64.89ms +[2025-07-05 10:28:39] [Rank 0] step:321/10000 train_time:20860ms step_avg:64.98ms +[2025-07-05 10:28:39] [Rank 0] step:321/10000 train_time:20860ms step_avg:64.98ms +[2025-07-05 10:28:40] [Rank 0] step:341/10000 train_time:22189ms step_avg:65.07ms +[2025-07-05 10:28:40] [Rank 0] step:341/10000 train_time:22189ms step_avg:65.07ms +[2025-07-05 10:28:41] [Rank 0] step:361/10000 train_time:23517ms step_avg:65.14ms +[2025-07-05 10:28:41] [Rank 0] step:361/10000 train_time:23517ms step_avg:65.14ms +[2025-07-05 10:28:43] [Rank 0] step:381/10000 train_time:24845ms step_avg:65.21ms +[2025-07-05 10:28:43] [Rank 0] step:381/10000 train_time:24845ms step_avg:65.21ms +[2025-07-05 10:28:44] [Rank 0] step:401/10000 train_time:26197ms step_avg:65.33ms +[2025-07-05 10:28:44] [Rank 0] step:401/10000 train_time:26197ms step_avg:65.33ms +[2025-07-05 10:28:45] [Rank 0] step:421/10000 train_time:27505ms step_avg:65.33ms +[2025-07-05 10:28:45] [Rank 0] step:421/10000 train_time:27505ms step_avg:65.33ms +[2025-07-05 10:28:47] [Rank 0] step:441/10000 train_time:28834ms step_avg:65.38ms +[2025-07-05 10:28:47] [Rank 0] step:441/10000 train_time:28834ms step_avg:65.38ms +[2025-07-05 10:28:48] [Rank 0] step:461/10000 train_time:30164ms step_avg:65.43ms +[2025-07-05 10:28:48] [Rank 0] step:461/10000 train_time:30164ms step_avg:65.43ms +[2025-07-05 10:28:49] [Rank 0] step:481/10000 train_time:31494ms step_avg:65.48ms +[2025-07-05 10:28:49] [Rank 0] step:481/10000 train_time:31494ms step_avg:65.48ms +[2025-07-05 10:28:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:28:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:28:51] [Rank 0] PRINT: step:500/10000 train_loss:8.7430 val_loss:7.1357 train_time:33429ms step_avg:66.86ms +[2025-07-05 10:28:51] [Rank 0] PRINT: step:500/10000 train_loss:8.7430 val_loss:7.1357 train_time:33429ms step_avg:66.86ms +[2025-07-05 10:28:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:28:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ea95660ff1464e39eade950ce21658dcb2c94ca5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a27c26c3-5b11-4070-a4f6-1302c1b05896", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_a27c26c3-5b11-4070-a4f6-1302c1b05896.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_a27c26c3-5b11-4070-a4f6-1302c1b05896.txt new file mode 100644 index 0000000000000000000000000000000000000000..19ecde085ba498dddc9cda5011dc3b1456b1847a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_a27c26c3-5b11-4070-a4f6-1302c1b05896.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:26:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:26:29 2025 --- +[2025-07-05 08:26:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:26:29 2025 --- +[2025-07-05 08:26:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:26:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:26:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:26:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:26:29] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:26:29] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:26:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42 +[2025-07-05 08:26:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42 +[2025-07-05 08:26:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:26:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:26:29] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:26:29] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:26:29] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:26:29] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:26:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:26:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:26:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:26:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:26:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:26:31] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:26:32] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:26:32] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:26:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:26:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:26:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:26:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:26:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:26:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:26:32] [Rank 0] PRINT: Model returns: +[2025-07-05 08:26:32] [Rank 0] PRINT: Model returns: +[2025-07-05 08:26:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:26:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:26:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:26:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:26:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:26:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:26:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:26:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:26:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:26:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:26:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:26:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:26:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:26:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:26:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:26:32] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:27:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:27:36] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:27:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:27:36] [Rank 0] PRINT: Starting training... +[2025-07-05 08:27:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:27:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:27:43] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:27:45] [Rank 0] step:21/10000 train_time:718ms step_avg:34.19ms +[2025-07-05 08:27:45] [Rank 0] step:21/10000 train_time:718ms step_avg:34.19ms +[2025-07-05 08:27:46] [Rank 0] step:41/10000 train_time:2146ms step_avg:52.34ms +[2025-07-05 08:27:46] [Rank 0] step:41/10000 train_time:2146ms step_avg:52.34ms +[2025-07-05 08:27:47] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-05 08:27:47] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-05 08:27:49] [Rank 0] step:81/10000 train_time:4798ms step_avg:59.23ms +[2025-07-05 08:27:49] [Rank 0] step:81/10000 train_time:4798ms step_avg:59.23ms +[2025-07-05 08:27:50] [Rank 0] step:101/10000 train_time:6123ms step_avg:60.62ms +[2025-07-05 08:27:50] [Rank 0] step:101/10000 train_time:6123ms step_avg:60.62ms +[2025-07-05 08:27:51] [Rank 0] step:121/10000 train_time:7448ms step_avg:61.55ms +[2025-07-05 08:27:51] [Rank 0] step:121/10000 train_time:7448ms step_avg:61.55ms +[2025-07-05 08:27:53] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 08:27:53] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 08:27:54] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 08:27:54] [Rank 0] step:161/10000 train_time:10100ms step_avg:62.73ms +[2025-07-05 08:27:55] [Rank 0] step:181/10000 train_time:12101ms step_avg:66.85ms +[2025-07-05 08:27:55] [Rank 0] step:181/10000 train_time:12101ms step_avg:66.85ms +[2025-07-05 08:27:57] [Rank 0] step:201/10000 train_time:12815ms step_avg:63.76ms +[2025-07-05 08:27:57] [Rank 0] step:201/10000 train_time:12815ms step_avg:63.76ms +[2025-07-05 08:27:58] [Rank 0] step:221/10000 train_time:14144ms step_avg:64.00ms +[2025-07-05 08:27:58] [Rank 0] step:221/10000 train_time:14144ms step_avg:64.00ms +[2025-07-05 08:27:59] [Rank 0] step:241/10000 train_time:15473ms step_avg:64.20ms +[2025-07-05 08:27:59] [Rank 0] step:241/10000 train_time:15473ms step_avg:64.20ms +[2025-07-05 08:28:01] [Rank 0] step:261/10000 train_time:16804ms step_avg:64.38ms +[2025-07-05 08:28:01] [Rank 0] step:261/10000 train_time:16804ms step_avg:64.38ms +[2025-07-05 08:28:02] [Rank 0] step:281/10000 train_time:18134ms step_avg:64.54ms +[2025-07-05 08:28:02] [Rank 0] step:281/10000 train_time:18134ms step_avg:64.54ms +[2025-07-05 08:28:03] [Rank 0] step:301/10000 train_time:19467ms step_avg:64.67ms +[2025-07-05 08:28:03] [Rank 0] step:301/10000 train_time:19467ms step_avg:64.67ms +[2025-07-05 08:28:05] [Rank 0] step:321/10000 train_time:20799ms step_avg:64.79ms +[2025-07-05 08:28:05] [Rank 0] step:321/10000 train_time:20799ms step_avg:64.79ms +[2025-07-05 08:28:06] [Rank 0] step:341/10000 train_time:22134ms step_avg:64.91ms +[2025-07-05 08:28:06] [Rank 0] step:341/10000 train_time:22134ms step_avg:64.91ms +[2025-07-05 08:28:08] [Rank 0] step:361/10000 train_time:23472ms step_avg:65.02ms +[2025-07-05 08:28:08] [Rank 0] step:361/10000 train_time:23472ms step_avg:65.02ms +[2025-07-05 08:28:09] [Rank 0] step:381/10000 train_time:24851ms step_avg:65.23ms +[2025-07-05 08:28:09] [Rank 0] step:381/10000 train_time:24851ms step_avg:65.23ms +[2025-07-05 08:28:10] [Rank 0] step:401/10000 train_time:26187ms step_avg:65.31ms +[2025-07-05 08:28:10] [Rank 0] step:401/10000 train_time:26187ms step_avg:65.31ms +[2025-07-05 08:28:12] [Rank 0] step:421/10000 train_time:27526ms step_avg:65.38ms +[2025-07-05 08:28:12] [Rank 0] step:421/10000 train_time:27526ms step_avg:65.38ms +[2025-07-05 08:28:13] [Rank 0] step:441/10000 train_time:28863ms step_avg:65.45ms +[2025-07-05 08:28:13] [Rank 0] step:441/10000 train_time:28863ms step_avg:65.45ms +[2025-07-05 08:28:14] [Rank 0] step:461/10000 train_time:30205ms step_avg:65.52ms +[2025-07-05 08:28:14] [Rank 0] step:461/10000 train_time:30205ms step_avg:65.52ms +[2025-07-05 08:28:16] [Rank 0] step:481/10000 train_time:31645ms step_avg:65.79ms +[2025-07-05 08:28:16] [Rank 0] step:481/10000 train_time:31645ms step_avg:65.79ms +[2025-07-05 08:28:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:28:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:28:18] [Rank 0] PRINT: step:500/10000 train_loss:7.0241 val_loss:4.8072 train_time:33593ms step_avg:67.19ms +[2025-07-05 08:28:18] [Rank 0] PRINT: step:500/10000 train_loss:7.0241 val_loss:4.8072 train_time:33593ms step_avg:67.19ms +[2025-07-05 08:28:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:28:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9b695f7dbba878bf52517069b7fda13e12d2f3b0 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "250fdf03-46b4-4cc8-acc0-1da34df45643", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/training_log_250fdf03-46b4-4cc8-acc0-1da34df45643.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/training_log_250fdf03-46b4-4cc8-acc0-1da34df45643.txt new file mode 100644 index 0000000000000000000000000000000000000000..319de431ea3c8bbd12032752be58e6782671633e --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43/training_log_250fdf03-46b4-4cc8-acc0-1da34df45643.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:53:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:53:39 2025 --- +[2025-07-05 08:53:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:53:39 2025 --- +[2025-07-05 08:53:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:53:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:53:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:53:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:53:39] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:53:39] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:53:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43 +[2025-07-05 08:53:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_43 +[2025-07-05 08:53:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:53:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:53:39] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:53:39] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:53:39] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:53:39] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:53:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:53:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:53:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:53:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:53:41] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:53:41] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:53:42] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:53:42] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:53:42] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:53:42] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:53:42] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:53:42] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:53:42] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:53:42] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:53:42] [Rank 0] PRINT: Model returns: +[2025-07-05 08:53:42] [Rank 0] PRINT: Model returns: +[2025-07-05 08:53:42] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:53:42] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:53:42] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:53:42] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:53:42] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:53:42] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:53:42] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:53:42] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:53:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:53:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:53:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:53:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:53:42] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:53:42] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:53:42] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:53:42] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:54:46] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:54:46] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:54:46] [Rank 0] PRINT: Starting training... +[2025-07-05 08:54:46] [Rank 0] PRINT: Starting training... +[2025-07-05 08:54:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:54:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:54:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:54:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:54:55] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-05 08:54:55] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-05 08:54:56] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.28ms +[2025-07-05 08:54:56] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.28ms +[2025-07-05 08:54:58] [Rank 0] step:61/10000 train_time:3469ms step_avg:56.87ms +[2025-07-05 08:54:58] [Rank 0] step:61/10000 train_time:3469ms step_avg:56.87ms +[2025-07-05 08:54:59] [Rank 0] step:81/10000 train_time:4796ms step_avg:59.21ms +[2025-07-05 08:54:59] [Rank 0] step:81/10000 train_time:4796ms step_avg:59.21ms +[2025-07-05 08:55:00] [Rank 0] step:101/10000 train_time:6122ms step_avg:60.62ms +[2025-07-05 08:55:00] [Rank 0] step:101/10000 train_time:6122ms step_avg:60.62ms +[2025-07-05 08:55:02] [Rank 0] step:121/10000 train_time:7447ms step_avg:61.54ms +[2025-07-05 08:55:02] [Rank 0] step:121/10000 train_time:7447ms step_avg:61.54ms +[2025-07-05 08:55:03] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 08:55:03] [Rank 0] step:141/10000 train_time:8773ms step_avg:62.22ms +[2025-07-05 08:55:04] [Rank 0] step:161/10000 train_time:10098ms step_avg:62.72ms +[2025-07-05 08:55:04] [Rank 0] step:161/10000 train_time:10098ms step_avg:62.72ms +[2025-07-05 08:55:06] [Rank 0] step:181/10000 train_time:11685ms step_avg:64.56ms +[2025-07-05 08:55:06] [Rank 0] step:181/10000 train_time:11685ms step_avg:64.56ms +[2025-07-05 08:55:07] [Rank 0] step:201/10000 train_time:12819ms step_avg:63.78ms +[2025-07-05 08:55:07] [Rank 0] step:201/10000 train_time:12819ms step_avg:63.78ms +[2025-07-05 08:55:08] [Rank 0] step:221/10000 train_time:14145ms step_avg:64.01ms +[2025-07-05 08:55:08] [Rank 0] step:221/10000 train_time:14145ms step_avg:64.01ms +[2025-07-05 08:55:10] [Rank 0] step:241/10000 train_time:15472ms step_avg:64.20ms +[2025-07-05 08:55:10] [Rank 0] step:241/10000 train_time:15472ms step_avg:64.20ms +[2025-07-05 08:55:11] [Rank 0] step:261/10000 train_time:16801ms step_avg:64.37ms +[2025-07-05 08:55:11] [Rank 0] step:261/10000 train_time:16801ms step_avg:64.37ms +[2025-07-05 08:55:12] [Rank 0] step:281/10000 train_time:18129ms step_avg:64.52ms +[2025-07-05 08:55:12] [Rank 0] step:281/10000 train_time:18129ms step_avg:64.52ms +[2025-07-05 08:55:14] [Rank 0] step:301/10000 train_time:19459ms step_avg:64.65ms +[2025-07-05 08:55:14] [Rank 0] step:301/10000 train_time:19459ms step_avg:64.65ms +[2025-07-05 08:55:15] [Rank 0] step:321/10000 train_time:20790ms step_avg:64.77ms +[2025-07-05 08:55:15] [Rank 0] step:321/10000 train_time:20790ms step_avg:64.77ms +[2025-07-05 08:55:16] [Rank 0] step:341/10000 train_time:22121ms step_avg:64.87ms +[2025-07-05 08:55:16] [Rank 0] step:341/10000 train_time:22121ms step_avg:64.87ms +[2025-07-05 08:55:18] [Rank 0] step:361/10000 train_time:24133ms step_avg:66.85ms +[2025-07-05 08:55:18] [Rank 0] step:361/10000 train_time:24133ms step_avg:66.85ms +[2025-07-05 08:55:19] [Rank 0] step:381/10000 train_time:24854ms step_avg:65.23ms +[2025-07-05 08:55:19] [Rank 0] step:381/10000 train_time:24854ms step_avg:65.23ms +[2025-07-05 08:55:20] [Rank 0] step:401/10000 train_time:26194ms step_avg:65.32ms +[2025-07-05 08:55:20] [Rank 0] step:401/10000 train_time:26194ms step_avg:65.32ms +[2025-07-05 08:55:22] [Rank 0] step:421/10000 train_time:27536ms step_avg:65.41ms +[2025-07-05 08:55:22] [Rank 0] step:421/10000 train_time:27536ms step_avg:65.41ms +[2025-07-05 08:55:23] [Rank 0] step:441/10000 train_time:28876ms step_avg:65.48ms +[2025-07-05 08:55:23] [Rank 0] step:441/10000 train_time:28876ms step_avg:65.48ms +[2025-07-05 08:55:24] [Rank 0] step:461/10000 train_time:30218ms step_avg:65.55ms +[2025-07-05 08:55:24] [Rank 0] step:461/10000 train_time:30218ms step_avg:65.55ms +[2025-07-05 08:55:26] [Rank 0] step:481/10000 train_time:31561ms step_avg:65.62ms +[2025-07-05 08:55:26] [Rank 0] step:481/10000 train_time:31561ms step_avg:65.62ms +[2025-07-05 08:55:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:55:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:55:28] [Rank 0] PRINT: step:500/10000 train_loss:7.1250 val_loss:5.1766 train_time:33515ms step_avg:67.03ms +[2025-07-05 08:55:28] [Rank 0] PRINT: step:500/10000 train_loss:7.1250 val_loss:5.1766 train_time:33515ms step_avg:67.03ms +[2025-07-05 08:55:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:55:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..54410caa2ae371c5a5abf0f4287f222c33164bd9 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "18a69e13-7886-469a-825c-137d3e482df5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/training_log_18a69e13-7886-469a-825c-137d3e482df5.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/training_log_18a69e13-7886-469a-825c-137d3e482df5.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2799f0fb7eb4d23a84700eee9eb8e0358627a71 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44/training_log_18a69e13-7886-469a-825c-137d3e482df5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:21:19] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:21:19 2025 --- +[2025-07-05 09:21:19] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:21:19 2025 --- +[2025-07-05 09:21:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:21:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:21:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:21:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:21:19] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:21:19] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:21:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44 +[2025-07-05 09:21:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_44 +[2025-07-05 09:21:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:21:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:21:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:21:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:21:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:21:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:21:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:21:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:21:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:21:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:21:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:21:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:21:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:21:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:21:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:21:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:21:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:21:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:21:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:21:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:21:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:21:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:21:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:21:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:21:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:21:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:21:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:21:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:21:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:21:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:21:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:21:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:21:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:21:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:21:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:21:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:21:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:21:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:22:26] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:22:26] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:22:26] [Rank 0] PRINT: Starting training... +[2025-07-05 09:22:26] [Rank 0] PRINT: Starting training... +[2025-07-05 09:22:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:22:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:22:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:22:35] [Rank 0] step:21/10000 train_time:818ms step_avg:38.97ms +[2025-07-05 09:22:35] [Rank 0] step:21/10000 train_time:818ms step_avg:38.97ms +[2025-07-05 09:22:36] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.26ms +[2025-07-05 09:22:36] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.26ms +[2025-07-05 09:22:37] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.82ms +[2025-07-05 09:22:37] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.82ms +[2025-07-05 09:22:38] [Rank 0] step:81/10000 train_time:4791ms step_avg:59.15ms +[2025-07-05 09:22:38] [Rank 0] step:81/10000 train_time:4791ms step_avg:59.15ms +[2025-07-05 09:22:40] [Rank 0] step:101/10000 train_time:6114ms step_avg:60.54ms +[2025-07-05 09:22:40] [Rank 0] step:101/10000 train_time:6114ms step_avg:60.54ms +[2025-07-05 09:22:41] [Rank 0] step:121/10000 train_time:7437ms step_avg:61.46ms +[2025-07-05 09:22:41] [Rank 0] step:121/10000 train_time:7437ms step_avg:61.46ms +[2025-07-05 09:22:42] [Rank 0] step:141/10000 train_time:8760ms step_avg:62.13ms +[2025-07-05 09:22:42] [Rank 0] step:141/10000 train_time:8760ms step_avg:62.13ms +[2025-07-05 09:22:44] [Rank 0] step:161/10000 train_time:10083ms step_avg:62.62ms +[2025-07-05 09:22:44] [Rank 0] step:161/10000 train_time:10083ms step_avg:62.62ms +[2025-07-05 09:22:45] [Rank 0] step:181/10000 train_time:11454ms step_avg:63.28ms +[2025-07-05 09:22:45] [Rank 0] step:181/10000 train_time:11454ms step_avg:63.28ms +[2025-07-05 09:22:46] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-05 09:22:46] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-05 09:22:48] [Rank 0] step:221/10000 train_time:14118ms step_avg:63.88ms +[2025-07-05 09:22:48] [Rank 0] step:221/10000 train_time:14118ms step_avg:63.88ms +[2025-07-05 09:22:49] [Rank 0] step:241/10000 train_time:15444ms step_avg:64.08ms +[2025-07-05 09:22:49] [Rank 0] step:241/10000 train_time:15444ms step_avg:64.08ms +[2025-07-05 09:22:50] [Rank 0] step:261/10000 train_time:16772ms step_avg:64.26ms +[2025-07-05 09:22:50] [Rank 0] step:261/10000 train_time:16772ms step_avg:64.26ms +[2025-07-05 09:22:52] [Rank 0] step:281/10000 train_time:18100ms step_avg:64.41ms +[2025-07-05 09:22:52] [Rank 0] step:281/10000 train_time:18100ms step_avg:64.41ms +[2025-07-05 09:22:53] [Rank 0] step:301/10000 train_time:19428ms step_avg:64.54ms +[2025-07-05 09:22:53] [Rank 0] step:301/10000 train_time:19428ms step_avg:64.54ms +[2025-07-05 09:22:54] [Rank 0] step:321/10000 train_time:20758ms step_avg:64.67ms +[2025-07-05 09:22:54] [Rank 0] step:321/10000 train_time:20758ms step_avg:64.67ms +[2025-07-05 09:22:56] [Rank 0] step:341/10000 train_time:22090ms step_avg:64.78ms +[2025-07-05 09:22:56] [Rank 0] step:341/10000 train_time:22090ms step_avg:64.78ms +[2025-07-05 09:22:57] [Rank 0] step:361/10000 train_time:23679ms step_avg:65.59ms +[2025-07-05 09:22:57] [Rank 0] step:361/10000 train_time:23679ms step_avg:65.59ms +[2025-07-05 09:22:59] [Rank 0] step:381/10000 train_time:24830ms step_avg:65.17ms +[2025-07-05 09:22:59] [Rank 0] step:381/10000 train_time:24830ms step_avg:65.17ms +[2025-07-05 09:23:00] [Rank 0] step:401/10000 train_time:26170ms step_avg:65.26ms +[2025-07-05 09:23:00] [Rank 0] step:401/10000 train_time:26170ms step_avg:65.26ms +[2025-07-05 09:23:01] [Rank 0] step:421/10000 train_time:27509ms step_avg:65.34ms +[2025-07-05 09:23:01] [Rank 0] step:421/10000 train_time:27509ms step_avg:65.34ms +[2025-07-05 09:23:03] [Rank 0] step:441/10000 train_time:28848ms step_avg:65.42ms +[2025-07-05 09:23:03] [Rank 0] step:441/10000 train_time:28848ms step_avg:65.42ms +[2025-07-05 09:23:04] [Rank 0] step:461/10000 train_time:30189ms step_avg:65.49ms +[2025-07-05 09:23:04] [Rank 0] step:461/10000 train_time:30189ms step_avg:65.49ms +[2025-07-05 09:23:05] [Rank 0] step:481/10000 train_time:31530ms step_avg:65.55ms +[2025-07-05 09:23:05] [Rank 0] step:481/10000 train_time:31530ms step_avg:65.55ms +[2025-07-05 09:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:23:07] [Rank 0] PRINT: step:500/10000 train_loss:7.0864 val_loss:5.0525 train_time:33483ms step_avg:66.97ms +[2025-07-05 09:23:07] [Rank 0] PRINT: step:500/10000 train_loss:7.0864 val_loss:5.0525 train_time:33483ms step_avg:66.97ms +[2025-07-05 09:23:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:23:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7009bfb63feec82221ab23cc903e1460d110b31c --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e1255aeb-e32d-4ca7-96ae-042c22f04bdb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_e1255aeb-e32d-4ca7-96ae-042c22f04bdb.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_e1255aeb-e32d-4ca7-96ae-042c22f04bdb.txt new file mode 100644 index 0000000000000000000000000000000000000000..45902c856ec1608ac5ca768880a19602111b98cb --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_e1255aeb-e32d-4ca7-96ae-042c22f04bdb.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:48:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:48:21 2025 --- +[2025-07-05 09:48:21] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:48:21 2025 --- +[2025-07-05 09:48:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:48:21] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:48:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:48:21] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:48:21] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:48:21] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:48:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45 +[2025-07-05 09:48:21] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45 +[2025-07-05 09:48:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:48:21] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:48:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:48:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:48:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:48:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:48:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:48:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:48:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:48:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:48:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:48:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:48:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:48:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:48:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:48:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:48:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:48:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:48:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:48:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:48:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:48:24] [Rank 0] PRINT: Model returns: +[2025-07-05 09:48:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:48:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:48:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:48:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:48:24] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:48:24] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:48:24] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:48:24] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:48:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:48:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:48:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:48:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:48:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:48:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:48:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:48:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:49:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:49:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:49:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:49:27] [Rank 0] PRINT: Starting training... +[2025-07-05 09:49:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:49:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:49:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:49:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:49:36] [Rank 0] step:21/10000 train_time:817ms step_avg:38.93ms +[2025-07-05 09:49:36] [Rank 0] step:21/10000 train_time:817ms step_avg:38.93ms +[2025-07-05 09:49:37] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.26ms +[2025-07-05 09:49:37] [Rank 0] step:41/10000 train_time:2143ms step_avg:52.26ms +[2025-07-05 09:49:38] [Rank 0] step:61/10000 train_time:3471ms step_avg:56.90ms +[2025-07-05 09:49:38] [Rank 0] step:61/10000 train_time:3471ms step_avg:56.90ms +[2025-07-05 09:49:40] [Rank 0] step:81/10000 train_time:4799ms step_avg:59.24ms +[2025-07-05 09:49:40] [Rank 0] step:81/10000 train_time:4799ms step_avg:59.24ms +[2025-07-05 09:49:41] [Rank 0] step:101/10000 train_time:6124ms step_avg:60.64ms +[2025-07-05 09:49:41] [Rank 0] step:101/10000 train_time:6124ms step_avg:60.64ms +[2025-07-05 09:49:42] [Rank 0] step:121/10000 train_time:7553ms step_avg:62.42ms +[2025-07-05 09:49:42] [Rank 0] step:121/10000 train_time:7553ms step_avg:62.42ms +[2025-07-05 09:49:44] [Rank 0] step:141/10000 train_time:8880ms step_avg:62.98ms +[2025-07-05 09:49:44] [Rank 0] step:141/10000 train_time:8880ms step_avg:62.98ms +[2025-07-05 09:49:45] [Rank 0] step:161/10000 train_time:10206ms step_avg:63.39ms +[2025-07-05 09:49:45] [Rank 0] step:161/10000 train_time:10206ms step_avg:63.39ms +[2025-07-05 09:49:46] [Rank 0] step:181/10000 train_time:11578ms step_avg:63.97ms +[2025-07-05 09:49:46] [Rank 0] step:181/10000 train_time:11578ms step_avg:63.97ms +[2025-07-05 09:49:48] [Rank 0] step:201/10000 train_time:12922ms step_avg:64.29ms +[2025-07-05 09:49:48] [Rank 0] step:201/10000 train_time:12922ms step_avg:64.29ms +[2025-07-05 09:49:49] [Rank 0] step:221/10000 train_time:14248ms step_avg:64.47ms +[2025-07-05 09:49:49] [Rank 0] step:221/10000 train_time:14248ms step_avg:64.47ms +[2025-07-05 09:49:50] [Rank 0] step:241/10000 train_time:15676ms step_avg:65.04ms +[2025-07-05 09:49:50] [Rank 0] step:241/10000 train_time:15676ms step_avg:65.04ms +[2025-07-05 09:49:52] [Rank 0] step:261/10000 train_time:17003ms step_avg:65.14ms +[2025-07-05 09:49:52] [Rank 0] step:261/10000 train_time:17003ms step_avg:65.14ms +[2025-07-05 09:49:53] [Rank 0] step:281/10000 train_time:18332ms step_avg:65.24ms +[2025-07-05 09:49:53] [Rank 0] step:281/10000 train_time:18332ms step_avg:65.24ms +[2025-07-05 09:49:54] [Rank 0] step:301/10000 train_time:19662ms step_avg:65.32ms +[2025-07-05 09:49:54] [Rank 0] step:301/10000 train_time:19662ms step_avg:65.32ms +[2025-07-05 09:49:56] [Rank 0] step:321/10000 train_time:20993ms step_avg:65.40ms +[2025-07-05 09:49:56] [Rank 0] step:321/10000 train_time:20993ms step_avg:65.40ms +[2025-07-05 09:49:57] [Rank 0] step:341/10000 train_time:22373ms step_avg:65.61ms +[2025-07-05 09:49:57] [Rank 0] step:341/10000 train_time:22373ms step_avg:65.61ms +[2025-07-05 09:49:59] [Rank 0] step:361/10000 train_time:23711ms step_avg:65.68ms +[2025-07-05 09:49:59] [Rank 0] step:361/10000 train_time:23711ms step_avg:65.68ms +[2025-07-05 09:50:00] [Rank 0] step:381/10000 train_time:25088ms step_avg:65.85ms +[2025-07-05 09:50:00] [Rank 0] step:381/10000 train_time:25088ms step_avg:65.85ms +[2025-07-05 09:50:01] [Rank 0] step:401/10000 train_time:26428ms step_avg:65.90ms +[2025-07-05 09:50:01] [Rank 0] step:401/10000 train_time:26428ms step_avg:65.90ms +[2025-07-05 09:50:03] [Rank 0] step:421/10000 train_time:27766ms step_avg:65.95ms +[2025-07-05 09:50:03] [Rank 0] step:421/10000 train_time:27766ms step_avg:65.95ms +[2025-07-05 09:50:04] [Rank 0] step:441/10000 train_time:29106ms step_avg:66.00ms +[2025-07-05 09:50:04] [Rank 0] step:441/10000 train_time:29106ms step_avg:66.00ms +[2025-07-05 09:50:05] [Rank 0] step:461/10000 train_time:30446ms step_avg:66.04ms +[2025-07-05 09:50:05] [Rank 0] step:461/10000 train_time:30446ms step_avg:66.04ms +[2025-07-05 09:50:07] [Rank 0] step:481/10000 train_time:31787ms step_avg:66.09ms +[2025-07-05 09:50:07] [Rank 0] step:481/10000 train_time:31787ms step_avg:66.09ms +[2025-07-05 09:50:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:50:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:50:09] [Rank 0] PRINT: step:500/10000 train_loss:7.0638 val_loss:4.9388 train_time:33737ms step_avg:67.47ms +[2025-07-05 09:50:09] [Rank 0] PRINT: step:500/10000 train_loss:7.0638 val_loss:4.9388 train_time:33737ms step_avg:67.47ms +[2025-07-05 09:50:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:50:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..aab659ff7965092f7b34e3b7f55a6f522d39e2b5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "73a69a0c-2589-4a94-b258-86c8d2873f8f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/training_log_73a69a0c-2589-4a94-b258-86c8d2873f8f.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/training_log_73a69a0c-2589-4a94-b258-86c8d2873f8f.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c4feb952ee954765357c9afd8b5e5d787add6bd --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46/training_log_73a69a0c-2589-4a94-b258-86c8d2873f8f.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:15:35] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:15:35 2025 --- +[2025-07-05 10:15:35] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:15:35 2025 --- +[2025-07-05 10:15:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:15:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:15:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:15:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:15:35] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:15:35] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:15:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46 +[2025-07-05 10:15:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_46 +[2025-07-05 10:15:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:15:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:15:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:15:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:15:36] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:15:36] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:15:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:15:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:15:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:15:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:15:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:15:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:15:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:15:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:15:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:15:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:15:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:15:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:15:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:15:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:15:39] [Rank 0] PRINT: Model returns: +[2025-07-05 10:15:39] [Rank 0] PRINT: Model returns: +[2025-07-05 10:15:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:15:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:15:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:15:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:15:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 10:15:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 10:15:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:15:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:15:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:15:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:15:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:15:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:15:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:15:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:15:39] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:15:39] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:16:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:16:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:16:42] [Rank 0] PRINT: Starting training... +[2025-07-05 10:16:42] [Rank 0] PRINT: Starting training... +[2025-07-05 10:16:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:16:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:16:49] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:16:49] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:16:50] [Rank 0] step:21/10000 train_time:719ms step_avg:34.24ms +[2025-07-05 10:16:50] [Rank 0] step:21/10000 train_time:719ms step_avg:34.24ms +[2025-07-05 10:16:52] [Rank 0] step:41/10000 train_time:2044ms step_avg:49.86ms +[2025-07-05 10:16:52] [Rank 0] step:41/10000 train_time:2044ms step_avg:49.86ms +[2025-07-05 10:16:53] [Rank 0] step:61/10000 train_time:3370ms step_avg:55.25ms +[2025-07-05 10:16:53] [Rank 0] step:61/10000 train_time:3370ms step_avg:55.25ms +[2025-07-05 10:16:54] [Rank 0] step:81/10000 train_time:4698ms step_avg:58.00ms +[2025-07-05 10:16:54] [Rank 0] step:81/10000 train_time:4698ms step_avg:58.00ms +[2025-07-05 10:16:56] [Rank 0] step:101/10000 train_time:6023ms step_avg:59.64ms +[2025-07-05 10:16:56] [Rank 0] step:101/10000 train_time:6023ms step_avg:59.64ms +[2025-07-05 10:16:57] [Rank 0] step:121/10000 train_time:7351ms step_avg:60.75ms +[2025-07-05 10:16:57] [Rank 0] step:121/10000 train_time:7351ms step_avg:60.75ms +[2025-07-05 10:16:58] [Rank 0] step:141/10000 train_time:8679ms step_avg:61.55ms +[2025-07-05 10:16:58] [Rank 0] step:141/10000 train_time:8679ms step_avg:61.55ms +[2025-07-05 10:17:00] [Rank 0] step:161/10000 train_time:10006ms step_avg:62.15ms +[2025-07-05 10:17:00] [Rank 0] step:161/10000 train_time:10006ms step_avg:62.15ms +[2025-07-05 10:17:01] [Rank 0] step:181/10000 train_time:11336ms step_avg:62.63ms +[2025-07-05 10:17:01] [Rank 0] step:181/10000 train_time:11336ms step_avg:62.63ms +[2025-07-05 10:17:02] [Rank 0] step:201/10000 train_time:12720ms step_avg:63.29ms +[2025-07-05 10:17:02] [Rank 0] step:201/10000 train_time:12720ms step_avg:63.29ms +[2025-07-05 10:17:04] [Rank 0] step:221/10000 train_time:14051ms step_avg:63.58ms +[2025-07-05 10:17:04] [Rank 0] step:221/10000 train_time:14051ms step_avg:63.58ms +[2025-07-05 10:17:05] [Rank 0] step:241/10000 train_time:15382ms step_avg:63.83ms +[2025-07-05 10:17:05] [Rank 0] step:241/10000 train_time:15382ms step_avg:63.83ms +[2025-07-05 10:17:06] [Rank 0] step:261/10000 train_time:16716ms step_avg:64.05ms +[2025-07-05 10:17:06] [Rank 0] step:261/10000 train_time:16716ms step_avg:64.05ms +[2025-07-05 10:17:08] [Rank 0] step:281/10000 train_time:18051ms step_avg:64.24ms +[2025-07-05 10:17:08] [Rank 0] step:281/10000 train_time:18051ms step_avg:64.24ms +[2025-07-05 10:17:09] [Rank 0] step:301/10000 train_time:19385ms step_avg:64.40ms +[2025-07-05 10:17:09] [Rank 0] step:301/10000 train_time:19385ms step_avg:64.40ms +[2025-07-05 10:17:10] [Rank 0] step:321/10000 train_time:20720ms step_avg:64.55ms +[2025-07-05 10:17:10] [Rank 0] step:321/10000 train_time:20720ms step_avg:64.55ms +[2025-07-05 10:17:12] [Rank 0] step:341/10000 train_time:22057ms step_avg:64.68ms +[2025-07-05 10:17:12] [Rank 0] step:341/10000 train_time:22057ms step_avg:64.68ms +[2025-07-05 10:17:13] [Rank 0] step:361/10000 train_time:23441ms step_avg:64.93ms +[2025-07-05 10:17:13] [Rank 0] step:361/10000 train_time:23441ms step_avg:64.93ms +[2025-07-05 10:17:14] [Rank 0] step:381/10000 train_time:24788ms step_avg:65.06ms +[2025-07-05 10:17:14] [Rank 0] step:381/10000 train_time:24788ms step_avg:65.06ms +[2025-07-05 10:17:16] [Rank 0] step:401/10000 train_time:26127ms step_avg:65.16ms +[2025-07-05 10:17:16] [Rank 0] step:401/10000 train_time:26127ms step_avg:65.16ms +[2025-07-05 10:17:17] [Rank 0] step:421/10000 train_time:27468ms step_avg:65.25ms +[2025-07-05 10:17:17] [Rank 0] step:421/10000 train_time:27468ms step_avg:65.25ms +[2025-07-05 10:17:18] [Rank 0] step:441/10000 train_time:28810ms step_avg:65.33ms +[2025-07-05 10:17:18] [Rank 0] step:441/10000 train_time:28810ms step_avg:65.33ms +[2025-07-05 10:17:20] [Rank 0] step:461/10000 train_time:30208ms step_avg:65.53ms +[2025-07-05 10:17:20] [Rank 0] step:461/10000 train_time:30208ms step_avg:65.53ms +[2025-07-05 10:17:21] [Rank 0] step:481/10000 train_time:31652ms step_avg:65.80ms +[2025-07-05 10:17:21] [Rank 0] step:481/10000 train_time:31652ms step_avg:65.80ms +[2025-07-05 10:17:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:17:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:17:24] [Rank 0] PRINT: step:500/10000 train_loss:7.0548 val_loss:4.9446 train_time:33644ms step_avg:67.29ms +[2025-07-05 10:17:24] [Rank 0] PRINT: step:500/10000 train_loss:7.0548 val_loss:4.9446 train_time:33644ms step_avg:67.29ms +[2025-07-05 10:17:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:17:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ddc66cabb647b8e7cacc9dd6e26099f116b868c5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5c77be8b-cfa4-425e-87f1-5842eb1b8455", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/training_log_5c77be8b-cfa4-425e-87f1-5842eb1b8455.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/training_log_5c77be8b-cfa4-425e-87f1-5842eb1b8455.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca8ffd0149a32d2cf221cde2d578b7ad896f23d4 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47/training_log_5c77be8b-cfa4-425e-87f1-5842eb1b8455.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:35:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:35:04 2025 --- +[2025-07-05 08:35:04] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:35:04 2025 --- +[2025-07-05 08:35:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:35:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 08:35:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:35:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:35:04] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:35:04] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:35:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47 +[2025-07-05 08:35:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_47 +[2025-07-05 08:35:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:35:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:35:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:35:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:35:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:35:04] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:35:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:35:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:35:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:35:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:35:07] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:35:07] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:35:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:35:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:35:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:35:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:35:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:35:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:35:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:35:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:35:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:35:07] [Rank 0] PRINT: Model returns: +[2025-07-05 08:35:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:35:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:35:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:35:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:35:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:35:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 08:35:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:35:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:35:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:35:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:35:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:35:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:35:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:35:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:35:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:35:07] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:36:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:36:10] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:36:10] [Rank 0] PRINT: Starting training... +[2025-07-05 08:36:10] [Rank 0] PRINT: Starting training... +[2025-07-05 08:36:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:36:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:36:19] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:36:19] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.66ms +[2025-07-05 08:36:20] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 08:36:20] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.23ms +[2025-07-05 08:36:22] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.19ms +[2025-07-05 08:36:22] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.19ms +[2025-07-05 08:36:23] [Rank 0] step:81/10000 train_time:4997ms step_avg:61.69ms +[2025-07-05 08:36:23] [Rank 0] step:81/10000 train_time:4997ms step_avg:61.69ms +[2025-07-05 08:36:24] [Rank 0] step:101/10000 train_time:6322ms step_avg:62.60ms +[2025-07-05 08:36:24] [Rank 0] step:101/10000 train_time:6322ms step_avg:62.60ms +[2025-07-05 08:36:26] [Rank 0] step:121/10000 train_time:7649ms step_avg:63.21ms +[2025-07-05 08:36:26] [Rank 0] step:121/10000 train_time:7649ms step_avg:63.21ms +[2025-07-05 08:36:27] [Rank 0] step:141/10000 train_time:8976ms step_avg:63.66ms +[2025-07-05 08:36:27] [Rank 0] step:141/10000 train_time:8976ms step_avg:63.66ms +[2025-07-05 08:36:28] [Rank 0] step:161/10000 train_time:10303ms step_avg:64.00ms +[2025-07-05 08:36:28] [Rank 0] step:161/10000 train_time:10303ms step_avg:64.00ms +[2025-07-05 08:36:30] [Rank 0] step:181/10000 train_time:12291ms step_avg:67.91ms +[2025-07-05 08:36:30] [Rank 0] step:181/10000 train_time:12291ms step_avg:67.91ms +[2025-07-05 08:36:31] [Rank 0] step:201/10000 train_time:13006ms step_avg:64.71ms +[2025-07-05 08:36:31] [Rank 0] step:201/10000 train_time:13006ms step_avg:64.71ms +[2025-07-05 08:36:32] [Rank 0] step:221/10000 train_time:14333ms step_avg:64.86ms +[2025-07-05 08:36:32] [Rank 0] step:221/10000 train_time:14333ms step_avg:64.86ms +[2025-07-05 08:36:34] [Rank 0] step:241/10000 train_time:15661ms step_avg:64.98ms +[2025-07-05 08:36:34] [Rank 0] step:241/10000 train_time:15661ms step_avg:64.98ms +[2025-07-05 08:36:35] [Rank 0] step:261/10000 train_time:16989ms step_avg:65.09ms +[2025-07-05 08:36:35] [Rank 0] step:261/10000 train_time:16989ms step_avg:65.09ms +[2025-07-05 08:36:36] [Rank 0] step:281/10000 train_time:18318ms step_avg:65.19ms +[2025-07-05 08:36:36] [Rank 0] step:281/10000 train_time:18318ms step_avg:65.19ms +[2025-07-05 08:36:38] [Rank 0] step:301/10000 train_time:19649ms step_avg:65.28ms +[2025-07-05 08:36:38] [Rank 0] step:301/10000 train_time:19649ms step_avg:65.28ms +[2025-07-05 08:36:39] [Rank 0] step:321/10000 train_time:20980ms step_avg:65.36ms +[2025-07-05 08:36:39] [Rank 0] step:321/10000 train_time:20980ms step_avg:65.36ms +[2025-07-05 08:36:40] [Rank 0] step:341/10000 train_time:22314ms step_avg:65.44ms +[2025-07-05 08:36:40] [Rank 0] step:341/10000 train_time:22314ms step_avg:65.44ms +[2025-07-05 08:36:42] [Rank 0] step:361/10000 train_time:23903ms step_avg:66.21ms +[2025-07-05 08:36:42] [Rank 0] step:361/10000 train_time:23903ms step_avg:66.21ms +[2025-07-05 08:36:43] [Rank 0] step:381/10000 train_time:25043ms step_avg:65.73ms +[2025-07-05 08:36:43] [Rank 0] step:381/10000 train_time:25043ms step_avg:65.73ms +[2025-07-05 08:36:44] [Rank 0] step:401/10000 train_time:26380ms step_avg:65.79ms +[2025-07-05 08:36:44] [Rank 0] step:401/10000 train_time:26380ms step_avg:65.79ms +[2025-07-05 08:36:46] [Rank 0] step:421/10000 train_time:27717ms step_avg:65.84ms +[2025-07-05 08:36:46] [Rank 0] step:421/10000 train_time:27717ms step_avg:65.84ms +[2025-07-05 08:36:47] [Rank 0] step:441/10000 train_time:29056ms step_avg:65.89ms +[2025-07-05 08:36:47] [Rank 0] step:441/10000 train_time:29056ms step_avg:65.89ms +[2025-07-05 08:36:48] [Rank 0] step:461/10000 train_time:30395ms step_avg:65.93ms +[2025-07-05 08:36:48] [Rank 0] step:461/10000 train_time:30395ms step_avg:65.93ms +[2025-07-05 08:36:50] [Rank 0] step:481/10000 train_time:31735ms step_avg:65.98ms +[2025-07-05 08:36:50] [Rank 0] step:481/10000 train_time:31735ms step_avg:65.98ms +[2025-07-05 08:36:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:36:52] [Rank 0] PRINT: step:500/10000 train_loss:7.0278 val_loss:4.8139 train_time:33686ms step_avg:67.37ms +[2025-07-05 08:36:52] [Rank 0] PRINT: step:500/10000 train_loss:7.0278 val_loss:4.8139 train_time:33686ms step_avg:67.37ms +[2025-07-05 08:36:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:36:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ea10dfe0f094d2691666f531fcbda515e6b96ac4 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "60946efe-8970-4c7a-9264-c7f7364e2241", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_60946efe-8970-4c7a-9264-c7f7364e2241.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_60946efe-8970-4c7a-9264-c7f7364e2241.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb9ebab6ebf965c19823a81b413ff011b7282291 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_60946efe-8970-4c7a-9264-c7f7364e2241.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:02:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:02:27 2025 --- +[2025-07-05 09:02:27] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:02:27 2025 --- +[2025-07-05 09:02:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:02:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:02:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:02:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:02:27] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:02:27] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:02:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48 +[2025-07-05 09:02:27] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48 +[2025-07-05 09:02:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:02:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:02:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:02:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:02:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:02:28] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:02:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:02:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:02:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:02:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:02:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:02:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:02:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:02:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:02:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:02:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:02:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:02:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:02:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:02:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:02:30] [Rank 0] PRINT: Model returns: +[2025-07-05 09:02:30] [Rank 0] PRINT: Model returns: +[2025-07-05 09:02:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:02:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:02:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:02:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:02:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:02:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:02:31] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:02:31] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:02:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:02:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:02:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:02:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:02:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:02:31] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:02:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:02:31] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:03:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:03:34] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:03:35] [Rank 0] PRINT: Starting training... +[2025-07-05 09:03:35] [Rank 0] PRINT: Starting training... +[2025-07-05 09:03:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:03:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:03:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:03:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:03:43] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.52ms +[2025-07-05 09:03:43] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.52ms +[2025-07-05 09:03:45] [Rank 0] step:41/10000 train_time:2341ms step_avg:57.11ms +[2025-07-05 09:03:45] [Rank 0] step:41/10000 train_time:2341ms step_avg:57.11ms +[2025-07-05 09:03:46] [Rank 0] step:61/10000 train_time:3665ms step_avg:60.08ms +[2025-07-05 09:03:46] [Rank 0] step:61/10000 train_time:3665ms step_avg:60.08ms +[2025-07-05 09:03:47] [Rank 0] step:81/10000 train_time:4988ms step_avg:61.58ms +[2025-07-05 09:03:47] [Rank 0] step:81/10000 train_time:4988ms step_avg:61.58ms +[2025-07-05 09:03:49] [Rank 0] step:101/10000 train_time:6312ms step_avg:62.49ms +[2025-07-05 09:03:49] [Rank 0] step:101/10000 train_time:6312ms step_avg:62.49ms +[2025-07-05 09:03:50] [Rank 0] step:121/10000 train_time:7635ms step_avg:63.10ms +[2025-07-05 09:03:50] [Rank 0] step:121/10000 train_time:7635ms step_avg:63.10ms +[2025-07-05 09:03:51] [Rank 0] step:141/10000 train_time:8959ms step_avg:63.54ms +[2025-07-05 09:03:51] [Rank 0] step:141/10000 train_time:8959ms step_avg:63.54ms +[2025-07-05 09:03:53] [Rank 0] step:161/10000 train_time:10282ms step_avg:63.86ms +[2025-07-05 09:03:53] [Rank 0] step:161/10000 train_time:10282ms step_avg:63.86ms +[2025-07-05 09:03:54] [Rank 0] step:181/10000 train_time:11657ms step_avg:64.40ms +[2025-07-05 09:03:54] [Rank 0] step:181/10000 train_time:11657ms step_avg:64.40ms +[2025-07-05 09:03:55] [Rank 0] step:201/10000 train_time:12988ms step_avg:64.62ms +[2025-07-05 09:03:55] [Rank 0] step:201/10000 train_time:12988ms step_avg:64.62ms +[2025-07-05 09:03:57] [Rank 0] step:221/10000 train_time:14313ms step_avg:64.77ms +[2025-07-05 09:03:57] [Rank 0] step:221/10000 train_time:14313ms step_avg:64.77ms +[2025-07-05 09:03:58] [Rank 0] step:241/10000 train_time:15639ms step_avg:64.89ms +[2025-07-05 09:03:58] [Rank 0] step:241/10000 train_time:15639ms step_avg:64.89ms +[2025-07-05 09:03:59] [Rank 0] step:261/10000 train_time:16965ms step_avg:65.00ms +[2025-07-05 09:03:59] [Rank 0] step:261/10000 train_time:16965ms step_avg:65.00ms +[2025-07-05 09:04:01] [Rank 0] step:281/10000 train_time:18292ms step_avg:65.10ms +[2025-07-05 09:04:01] [Rank 0] step:281/10000 train_time:18292ms step_avg:65.10ms +[2025-07-05 09:04:02] [Rank 0] step:301/10000 train_time:19619ms step_avg:65.18ms +[2025-07-05 09:04:02] [Rank 0] step:301/10000 train_time:19619ms step_avg:65.18ms +[2025-07-05 09:04:03] [Rank 0] step:321/10000 train_time:20947ms step_avg:65.25ms +[2025-07-05 09:04:03] [Rank 0] step:321/10000 train_time:20947ms step_avg:65.25ms +[2025-07-05 09:04:05] [Rank 0] step:341/10000 train_time:22275ms step_avg:65.32ms +[2025-07-05 09:04:05] [Rank 0] step:341/10000 train_time:22275ms step_avg:65.32ms +[2025-07-05 09:04:06] [Rank 0] step:361/10000 train_time:23858ms step_avg:66.09ms +[2025-07-05 09:04:06] [Rank 0] step:361/10000 train_time:23858ms step_avg:66.09ms +[2025-07-05 09:04:07] [Rank 0] step:381/10000 train_time:24994ms step_avg:65.60ms +[2025-07-05 09:04:07] [Rank 0] step:381/10000 train_time:24994ms step_avg:65.60ms +[2025-07-05 09:04:09] [Rank 0] step:401/10000 train_time:26329ms step_avg:65.66ms +[2025-07-05 09:04:09] [Rank 0] step:401/10000 train_time:26329ms step_avg:65.66ms +[2025-07-05 09:04:10] [Rank 0] step:421/10000 train_time:27667ms step_avg:65.72ms +[2025-07-05 09:04:10] [Rank 0] step:421/10000 train_time:27667ms step_avg:65.72ms +[2025-07-05 09:04:11] [Rank 0] step:441/10000 train_time:29003ms step_avg:65.77ms +[2025-07-05 09:04:11] [Rank 0] step:441/10000 train_time:29003ms step_avg:65.77ms +[2025-07-05 09:04:13] [Rank 0] step:461/10000 train_time:30344ms step_avg:65.82ms +[2025-07-05 09:04:13] [Rank 0] step:461/10000 train_time:30344ms step_avg:65.82ms +[2025-07-05 09:04:14] [Rank 0] step:481/10000 train_time:31683ms step_avg:65.87ms +[2025-07-05 09:04:14] [Rank 0] step:481/10000 train_time:31683ms step_avg:65.87ms +[2025-07-05 09:04:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:04:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:04:16] [Rank 0] PRINT: step:500/10000 train_loss:7.1166 val_loss:5.1856 train_time:33634ms step_avg:67.27ms +[2025-07-05 09:04:16] [Rank 0] PRINT: step:500/10000 train_loss:7.1166 val_loss:5.1856 train_time:33634ms step_avg:67.27ms +[2025-07-05 09:04:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:04:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6c2a025f82ddf2bcc63232eb0d7fc973da828a5b --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "fae80918-d628-4df9-826e-82b840691456", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/training_log_fae80918-d628-4df9-826e-82b840691456.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/training_log_fae80918-d628-4df9-826e-82b840691456.txt new file mode 100644 index 0000000000000000000000000000000000000000..c08ca4522d105e3e724bc64fb676d3698eecaef6 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49/training_log_fae80918-d628-4df9-826e-82b840691456.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:29:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:29:57 2025 --- +[2025-07-05 09:29:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:29:57 2025 --- +[2025-07-05 09:29:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:29:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:29:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:29:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:29:57] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:29:57] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:29:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49 +[2025-07-05 09:29:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_49 +[2025-07-05 09:29:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:29:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:29:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:29:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:29:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:29:57] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:29:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:29:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:29:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:29:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:29:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:29:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:30:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:30:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:30:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:30:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:30:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:30:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:30:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:30:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:30:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:30:00] [Rank 0] PRINT: Model returns: +[2025-07-05 09:30:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:30:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:30:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:30:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:30:00] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:30:00] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:30:00] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:30:00] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:30:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:30:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:30:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:30:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:30:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:30:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:30:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:30:00] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:31:03] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:31:03] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:31:03] [Rank 0] PRINT: Starting training... +[2025-07-05 09:31:03] [Rank 0] PRINT: Starting training... +[2025-07-05 09:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:31:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:31:12] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.59ms +[2025-07-05 09:31:12] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.59ms +[2025-07-05 09:31:13] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.27ms +[2025-07-05 09:31:13] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.27ms +[2025-07-05 09:31:15] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-05 09:31:15] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-05 09:31:16] [Rank 0] step:81/10000 train_time:5000ms step_avg:61.73ms +[2025-07-05 09:31:16] [Rank 0] step:81/10000 train_time:5000ms step_avg:61.73ms +[2025-07-05 09:31:17] [Rank 0] step:101/10000 train_time:6326ms step_avg:62.63ms +[2025-07-05 09:31:17] [Rank 0] step:101/10000 train_time:6326ms step_avg:62.63ms +[2025-07-05 09:31:19] [Rank 0] step:121/10000 train_time:7651ms step_avg:63.23ms +[2025-07-05 09:31:19] [Rank 0] step:121/10000 train_time:7651ms step_avg:63.23ms +[2025-07-05 09:31:20] [Rank 0] step:141/10000 train_time:8976ms step_avg:63.66ms +[2025-07-05 09:31:20] [Rank 0] step:141/10000 train_time:8976ms step_avg:63.66ms +[2025-07-05 09:31:21] [Rank 0] step:161/10000 train_time:10303ms step_avg:63.99ms +[2025-07-05 09:31:21] [Rank 0] step:161/10000 train_time:10303ms step_avg:63.99ms +[2025-07-05 09:31:23] [Rank 0] step:181/10000 train_time:12309ms step_avg:68.01ms +[2025-07-05 09:31:23] [Rank 0] step:181/10000 train_time:12309ms step_avg:68.01ms +[2025-07-05 09:31:24] [Rank 0] step:201/10000 train_time:13026ms step_avg:64.80ms +[2025-07-05 09:31:24] [Rank 0] step:201/10000 train_time:13026ms step_avg:64.80ms +[2025-07-05 09:31:25] [Rank 0] step:221/10000 train_time:14356ms step_avg:64.96ms +[2025-07-05 09:31:25] [Rank 0] step:221/10000 train_time:14356ms step_avg:64.96ms +[2025-07-05 09:31:27] [Rank 0] step:241/10000 train_time:15685ms step_avg:65.08ms +[2025-07-05 09:31:27] [Rank 0] step:241/10000 train_time:15685ms step_avg:65.08ms +[2025-07-05 09:31:28] [Rank 0] step:261/10000 train_time:17016ms step_avg:65.20ms +[2025-07-05 09:31:28] [Rank 0] step:261/10000 train_time:17016ms step_avg:65.20ms +[2025-07-05 09:31:29] [Rank 0] step:281/10000 train_time:18348ms step_avg:65.30ms +[2025-07-05 09:31:29] [Rank 0] step:281/10000 train_time:18348ms step_avg:65.30ms +[2025-07-05 09:31:31] [Rank 0] step:301/10000 train_time:19679ms step_avg:65.38ms +[2025-07-05 09:31:31] [Rank 0] step:301/10000 train_time:19679ms step_avg:65.38ms +[2025-07-05 09:31:32] [Rank 0] step:321/10000 train_time:21011ms step_avg:65.46ms +[2025-07-05 09:31:32] [Rank 0] step:321/10000 train_time:21011ms step_avg:65.46ms +[2025-07-05 09:31:33] [Rank 0] step:341/10000 train_time:22350ms step_avg:65.54ms +[2025-07-05 09:31:33] [Rank 0] step:341/10000 train_time:22350ms step_avg:65.54ms +[2025-07-05 09:31:35] [Rank 0] step:361/10000 train_time:23736ms step_avg:65.75ms +[2025-07-05 09:31:35] [Rank 0] step:361/10000 train_time:23736ms step_avg:65.75ms +[2025-07-05 09:31:36] [Rank 0] step:381/10000 train_time:25095ms step_avg:65.87ms +[2025-07-05 09:31:36] [Rank 0] step:381/10000 train_time:25095ms step_avg:65.87ms +[2025-07-05 09:31:37] [Rank 0] step:401/10000 train_time:26436ms step_avg:65.93ms +[2025-07-05 09:31:37] [Rank 0] step:401/10000 train_time:26436ms step_avg:65.93ms +[2025-07-05 09:31:39] [Rank 0] step:421/10000 train_time:27778ms step_avg:65.98ms +[2025-07-05 09:31:39] [Rank 0] step:421/10000 train_time:27778ms step_avg:65.98ms +[2025-07-05 09:31:40] [Rank 0] step:441/10000 train_time:29120ms step_avg:66.03ms +[2025-07-05 09:31:40] [Rank 0] step:441/10000 train_time:29120ms step_avg:66.03ms +[2025-07-05 09:31:42] [Rank 0] step:461/10000 train_time:30464ms step_avg:66.08ms +[2025-07-05 09:31:42] [Rank 0] step:461/10000 train_time:30464ms step_avg:66.08ms +[2025-07-05 09:31:43] [Rank 0] step:481/10000 train_time:31807ms step_avg:66.13ms +[2025-07-05 09:31:43] [Rank 0] step:481/10000 train_time:31807ms step_avg:66.13ms +[2025-07-05 09:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:31:45] [Rank 0] PRINT: step:500/10000 train_loss:7.1010 val_loss:5.1146 train_time:33761ms step_avg:67.52ms +[2025-07-05 09:31:45] [Rank 0] PRINT: step:500/10000 train_loss:7.1010 val_loss:5.1146 train_time:33761ms step_avg:67.52ms +[2025-07-05 09:31:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:31:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7554d2e580ec795a06b7ea27c7c03669a7b0d8e9 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8bd47ec6-e667-404e-b67e-89137a1be516", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/training_log_8bd47ec6-e667-404e-b67e-89137a1be516.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/training_log_8bd47ec6-e667-404e-b67e-89137a1be516.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdb3a33392c8deb61ac02be28abe2a561ad40046 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50/training_log_8bd47ec6-e667-404e-b67e-89137a1be516.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:57:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:57:18 2025 --- +[2025-07-05 09:57:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:57:18 2025 --- +[2025-07-05 09:57:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:57:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 09:57:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:57:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:57:18] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:57:18] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:57:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50 +[2025-07-05 09:57:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_50 +[2025-07-05 09:57:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:57:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:57:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:57:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:57:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:57:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:57:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:57:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:57:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:57:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:57:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:57:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:57:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:57:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:57:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:57:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:57:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:57:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:57:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:57:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:57:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:57:22] [Rank 0] PRINT: Model returns: +[2025-07-05 09:57:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:57:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:57:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:57:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:57:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:57:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 09:57:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:57:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:57:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:57:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:57:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:57:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:57:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:57:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:57:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:57:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:58:24] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:58:24] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:58:24] [Rank 0] PRINT: Starting training... +[2025-07-05 09:58:24] [Rank 0] PRINT: Starting training... +[2025-07-05 09:58:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:58:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:58:31] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:58:31] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:58:33] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.60ms +[2025-07-05 09:58:33] [Rank 0] step:21/10000 train_time:1021ms step_avg:48.60ms +[2025-07-05 09:58:35] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.21ms +[2025-07-05 09:58:35] [Rank 0] step:41/10000 train_time:2346ms step_avg:57.21ms +[2025-07-05 09:58:36] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.20ms +[2025-07-05 09:58:36] [Rank 0] step:61/10000 train_time:3672ms step_avg:60.20ms +[2025-07-05 09:58:37] [Rank 0] step:81/10000 train_time:4999ms step_avg:61.71ms +[2025-07-05 09:58:37] [Rank 0] step:81/10000 train_time:4999ms step_avg:61.71ms +[2025-07-05 09:58:38] [Rank 0] step:101/10000 train_time:6325ms step_avg:62.62ms +[2025-07-05 09:58:38] [Rank 0] step:101/10000 train_time:6325ms step_avg:62.62ms +[2025-07-05 09:58:40] [Rank 0] step:121/10000 train_time:7652ms step_avg:63.24ms +[2025-07-05 09:58:40] [Rank 0] step:121/10000 train_time:7652ms step_avg:63.24ms +[2025-07-05 09:58:41] [Rank 0] step:141/10000 train_time:8978ms step_avg:63.67ms +[2025-07-05 09:58:41] [Rank 0] step:141/10000 train_time:8978ms step_avg:63.67ms +[2025-07-05 09:58:42] [Rank 0] step:161/10000 train_time:10306ms step_avg:64.01ms +[2025-07-05 09:58:42] [Rank 0] step:161/10000 train_time:10306ms step_avg:64.01ms +[2025-07-05 09:58:44] [Rank 0] step:181/10000 train_time:11635ms step_avg:64.28ms +[2025-07-05 09:58:44] [Rank 0] step:181/10000 train_time:11635ms step_avg:64.28ms +[2025-07-05 09:58:45] [Rank 0] step:201/10000 train_time:13030ms step_avg:64.83ms +[2025-07-05 09:58:45] [Rank 0] step:201/10000 train_time:13030ms step_avg:64.83ms +[2025-07-05 09:58:47] [Rank 0] step:221/10000 train_time:14359ms step_avg:64.97ms +[2025-07-05 09:58:47] [Rank 0] step:221/10000 train_time:14359ms step_avg:64.97ms +[2025-07-05 09:58:48] [Rank 0] step:241/10000 train_time:15689ms step_avg:65.10ms +[2025-07-05 09:58:48] [Rank 0] step:241/10000 train_time:15689ms step_avg:65.10ms +[2025-07-05 09:58:49] [Rank 0] step:261/10000 train_time:17023ms step_avg:65.22ms +[2025-07-05 09:58:49] [Rank 0] step:261/10000 train_time:17023ms step_avg:65.22ms +[2025-07-05 09:58:51] [Rank 0] step:281/10000 train_time:18355ms step_avg:65.32ms +[2025-07-05 09:58:51] [Rank 0] step:281/10000 train_time:18355ms step_avg:65.32ms +[2025-07-05 09:58:52] [Rank 0] step:301/10000 train_time:19688ms step_avg:65.41ms +[2025-07-05 09:58:52] [Rank 0] step:301/10000 train_time:19688ms step_avg:65.41ms +[2025-07-05 09:58:53] [Rank 0] step:321/10000 train_time:21022ms step_avg:65.49ms +[2025-07-05 09:58:53] [Rank 0] step:321/10000 train_time:21022ms step_avg:65.49ms +[2025-07-05 09:58:55] [Rank 0] step:341/10000 train_time:22359ms step_avg:65.57ms +[2025-07-05 09:58:55] [Rank 0] step:341/10000 train_time:22359ms step_avg:65.57ms +[2025-07-05 09:58:56] [Rank 0] step:361/10000 train_time:24378ms step_avg:67.53ms +[2025-07-05 09:58:56] [Rank 0] step:361/10000 train_time:24378ms step_avg:67.53ms +[2025-07-05 09:58:57] [Rank 0] step:381/10000 train_time:25099ms step_avg:65.88ms +[2025-07-05 09:58:57] [Rank 0] step:381/10000 train_time:25099ms step_avg:65.88ms +[2025-07-05 09:58:59] [Rank 0] step:401/10000 train_time:26438ms step_avg:65.93ms +[2025-07-05 09:58:59] [Rank 0] step:401/10000 train_time:26438ms step_avg:65.93ms +[2025-07-05 09:59:00] [Rank 0] step:421/10000 train_time:27778ms step_avg:65.98ms +[2025-07-05 09:59:00] [Rank 0] step:421/10000 train_time:27778ms step_avg:65.98ms +[2025-07-05 09:59:01] [Rank 0] step:441/10000 train_time:29117ms step_avg:66.03ms +[2025-07-05 09:59:01] [Rank 0] step:441/10000 train_time:29117ms step_avg:66.03ms +[2025-07-05 09:59:03] [Rank 0] step:461/10000 train_time:30458ms step_avg:66.07ms +[2025-07-05 09:59:03] [Rank 0] step:461/10000 train_time:30458ms step_avg:66.07ms +[2025-07-05 09:59:04] [Rank 0] step:481/10000 train_time:31800ms step_avg:66.11ms +[2025-07-05 09:59:04] [Rank 0] step:481/10000 train_time:31800ms step_avg:66.11ms +[2025-07-05 09:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:59:06] [Rank 0] PRINT: step:500/10000 train_loss:7.0198 val_loss:4.8142 train_time:33751ms step_avg:67.50ms +[2025-07-05 09:59:06] [Rank 0] PRINT: step:500/10000 train_loss:7.0198 val_loss:4.8142 train_time:33751ms step_avg:67.50ms +[2025-07-05 09:59:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:59:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7c1b353ec9d1e26dc4d5bdc4a7c5bcb2cc5c8f24 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9df4b042-1c76-4b61-86ff-182576ff9a87", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/training_log_9df4b042-1c76-4b61-86ff-182576ff9a87.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/training_log_9df4b042-1c76-4b61-86ff-182576ff9a87.txt new file mode 100644 index 0000000000000000000000000000000000000000..c66c993b4107aa3977a6859c0da03ae30179fe6a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51/training_log_9df4b042-1c76-4b61-86ff-182576ff9a87.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:24:47] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:24:47 2025 --- +[2025-07-05 10:24:47] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:24:47 2025 --- +[2025-07-05 10:24:47] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:24:47] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-05 10:24:47] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:24:47] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:24:47] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:24:47] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:24:47] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51 +[2025-07-05 10:24:47] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_51 +[2025-07-05 10:24:47] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:24:47] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:24:48] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:24:48] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:24:48] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:24:48] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:24:50] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:24:50] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:24:50] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:24:50] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:24:50] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:24:50] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:24:51] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:24:51] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:24:51] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:24:51] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:24:51] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:24:51] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:24:51] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:24:51] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:24:51] [Rank 0] PRINT: Model returns: +[2025-07-05 10:24:51] [Rank 0] PRINT: Model returns: +[2025-07-05 10:24:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:24:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:24:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:24:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:24:51] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 10:24:51] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-05 10:24:51] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:24:51] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:24:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:24:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:24:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:24:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:24:51] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:24:51] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:24:51] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:24:51] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:25:56] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:25:56] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:25:56] [Rank 0] PRINT: Starting training... +[2025-07-05 10:25:56] [Rank 0] PRINT: Starting training... +[2025-07-05 10:25:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:25:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:26:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:26:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:26:05] [Rank 0] step:21/10000 train_time:919ms step_avg:43.74ms +[2025-07-05 10:26:05] [Rank 0] step:21/10000 train_time:919ms step_avg:43.74ms +[2025-07-05 10:26:06] [Rank 0] step:41/10000 train_time:2245ms step_avg:54.75ms +[2025-07-05 10:26:06] [Rank 0] step:41/10000 train_time:2245ms step_avg:54.75ms +[2025-07-05 10:26:07] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.54ms +[2025-07-05 10:26:07] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.54ms +[2025-07-05 10:26:09] [Rank 0] step:81/10000 train_time:4898ms step_avg:60.47ms +[2025-07-05 10:26:09] [Rank 0] step:81/10000 train_time:4898ms step_avg:60.47ms +[2025-07-05 10:26:10] [Rank 0] step:101/10000 train_time:6225ms step_avg:61.64ms +[2025-07-05 10:26:10] [Rank 0] step:101/10000 train_time:6225ms step_avg:61.64ms +[2025-07-05 10:26:11] [Rank 0] step:121/10000 train_time:7553ms step_avg:62.42ms +[2025-07-05 10:26:11] [Rank 0] step:121/10000 train_time:7553ms step_avg:62.42ms +[2025-07-05 10:26:13] [Rank 0] step:141/10000 train_time:8883ms step_avg:63.00ms +[2025-07-05 10:26:13] [Rank 0] step:141/10000 train_time:8883ms step_avg:63.00ms +[2025-07-05 10:26:14] [Rank 0] step:161/10000 train_time:10211ms step_avg:63.42ms +[2025-07-05 10:26:14] [Rank 0] step:161/10000 train_time:10211ms step_avg:63.42ms +[2025-07-05 10:26:15] [Rank 0] step:181/10000 train_time:11539ms step_avg:63.75ms +[2025-07-05 10:26:15] [Rank 0] step:181/10000 train_time:11539ms step_avg:63.75ms +[2025-07-05 10:26:17] [Rank 0] step:201/10000 train_time:12918ms step_avg:64.27ms +[2025-07-05 10:26:17] [Rank 0] step:201/10000 train_time:12918ms step_avg:64.27ms +[2025-07-05 10:26:18] [Rank 0] step:221/10000 train_time:14247ms step_avg:64.47ms +[2025-07-05 10:26:18] [Rank 0] step:221/10000 train_time:14247ms step_avg:64.47ms +[2025-07-05 10:26:19] [Rank 0] step:241/10000 train_time:15578ms step_avg:64.64ms +[2025-07-05 10:26:19] [Rank 0] step:241/10000 train_time:15578ms step_avg:64.64ms +[2025-07-05 10:26:21] [Rank 0] step:261/10000 train_time:16913ms step_avg:64.80ms +[2025-07-05 10:26:21] [Rank 0] step:261/10000 train_time:16913ms step_avg:64.80ms +[2025-07-05 10:26:22] [Rank 0] step:281/10000 train_time:18245ms step_avg:64.93ms +[2025-07-05 10:26:22] [Rank 0] step:281/10000 train_time:18245ms step_avg:64.93ms +[2025-07-05 10:26:23] [Rank 0] step:301/10000 train_time:19576ms step_avg:65.04ms +[2025-07-05 10:26:23] [Rank 0] step:301/10000 train_time:19576ms step_avg:65.04ms +[2025-07-05 10:26:25] [Rank 0] step:321/10000 train_time:20910ms step_avg:65.14ms +[2025-07-05 10:26:25] [Rank 0] step:321/10000 train_time:20910ms step_avg:65.14ms +[2025-07-05 10:26:26] [Rank 0] step:341/10000 train_time:22246ms step_avg:65.24ms +[2025-07-05 10:26:26] [Rank 0] step:341/10000 train_time:22246ms step_avg:65.24ms +[2025-07-05 10:26:27] [Rank 0] step:361/10000 train_time:23631ms step_avg:65.46ms +[2025-07-05 10:26:27] [Rank 0] step:361/10000 train_time:23631ms step_avg:65.46ms +[2025-07-05 10:26:29] [Rank 0] step:381/10000 train_time:24986ms step_avg:65.58ms +[2025-07-05 10:26:29] [Rank 0] step:381/10000 train_time:24986ms step_avg:65.58ms +[2025-07-05 10:26:30] [Rank 0] step:401/10000 train_time:26325ms step_avg:65.65ms +[2025-07-05 10:26:30] [Rank 0] step:401/10000 train_time:26325ms step_avg:65.65ms +[2025-07-05 10:26:31] [Rank 0] step:421/10000 train_time:27665ms step_avg:65.71ms +[2025-07-05 10:26:31] [Rank 0] step:421/10000 train_time:27665ms step_avg:65.71ms +[2025-07-05 10:26:33] [Rank 0] step:441/10000 train_time:29005ms step_avg:65.77ms +[2025-07-05 10:26:33] [Rank 0] step:441/10000 train_time:29005ms step_avg:65.77ms +[2025-07-05 10:26:34] [Rank 0] step:461/10000 train_time:30344ms step_avg:65.82ms +[2025-07-05 10:26:34] [Rank 0] step:461/10000 train_time:30344ms step_avg:65.82ms +[2025-07-05 10:26:35] [Rank 0] step:481/10000 train_time:31685ms step_avg:65.87ms +[2025-07-05 10:26:35] [Rank 0] step:481/10000 train_time:31685ms step_avg:65.87ms +[2025-07-05 10:26:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:26:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:26:38] [Rank 0] PRINT: step:500/10000 train_loss:7.0190 val_loss:4.8097 train_time:33703ms step_avg:67.41ms +[2025-07-05 10:26:38] [Rank 0] PRINT: step:500/10000 train_loss:7.0190 val_loss:4.8097 train_time:33703ms step_avg:67.41ms +[2025-07-05 10:26:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:26:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..33056f31a17c64919f6fa32b89d6d5de61afd4a9 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "83c5910c-2af4-4b45-baa7-de19762ac6f8", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_83c5910c-2af4-4b45-baa7-de19762ac6f8.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_83c5910c-2af4-4b45-baa7-de19762ac6f8.txt new file mode 100644 index 0000000000000000000000000000000000000000..140784806c0a193af0c4af6b912a158d3ed44974 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_83c5910c-2af4-4b45-baa7-de19762ac6f8.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:24:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:24:18 2025 --- +[2025-07-05 08:24:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:24:18 2025 --- +[2025-07-05 08:24:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:24:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:24:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:24:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:24:18] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:24:18] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:24:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42 +[2025-07-05 08:24:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42 +[2025-07-05 08:24:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:24:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:24:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:24:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:24:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:24:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:24:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:24:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:24:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:24:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:24:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:24:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:24:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:24:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:24:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:24:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:24:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:24:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:24:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:24:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:24:21] [Rank 0] PRINT: Model returns: +[2025-07-05 08:24:21] [Rank 0] PRINT: Model returns: +[2025-07-05 08:24:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:24:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:24:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:24:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:24:21] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:24:21] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:24:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:24:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:24:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:24:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:24:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:24:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:24:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:24:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:24:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:24:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:25:24] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:25:24] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:25:24] [Rank 0] PRINT: Starting training... +[2025-07-05 08:25:24] [Rank 0] PRINT: Starting training... +[2025-07-05 08:25:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:25:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:25:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:25:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:25:34] [Rank 0] step:21/10000 train_time:815ms step_avg:38.79ms +[2025-07-05 08:25:34] [Rank 0] step:21/10000 train_time:815ms step_avg:38.79ms +[2025-07-05 08:25:35] [Rank 0] step:41/10000 train_time:2147ms step_avg:52.36ms +[2025-07-05 08:25:35] [Rank 0] step:41/10000 train_time:2147ms step_avg:52.36ms +[2025-07-05 08:25:36] [Rank 0] step:61/10000 train_time:3452ms step_avg:56.59ms +[2025-07-05 08:25:36] [Rank 0] step:61/10000 train_time:3452ms step_avg:56.59ms +[2025-07-05 08:25:38] [Rank 0] step:81/10000 train_time:4772ms step_avg:58.91ms +[2025-07-05 08:25:38] [Rank 0] step:81/10000 train_time:4772ms step_avg:58.91ms +[2025-07-05 08:25:39] [Rank 0] step:101/10000 train_time:6093ms step_avg:60.32ms +[2025-07-05 08:25:39] [Rank 0] step:101/10000 train_time:6093ms step_avg:60.32ms +[2025-07-05 08:25:40] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.26ms +[2025-07-05 08:25:40] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.26ms +[2025-07-05 08:25:42] [Rank 0] step:141/10000 train_time:8735ms step_avg:61.95ms +[2025-07-05 08:25:42] [Rank 0] step:141/10000 train_time:8735ms step_avg:61.95ms +[2025-07-05 08:25:43] [Rank 0] step:161/10000 train_time:10056ms step_avg:62.46ms +[2025-07-05 08:25:43] [Rank 0] step:161/10000 train_time:10056ms step_avg:62.46ms +[2025-07-05 08:25:44] [Rank 0] step:181/10000 train_time:11428ms step_avg:63.14ms +[2025-07-05 08:25:44] [Rank 0] step:181/10000 train_time:11428ms step_avg:63.14ms +[2025-07-05 08:25:46] [Rank 0] step:201/10000 train_time:12750ms step_avg:63.43ms +[2025-07-05 08:25:46] [Rank 0] step:201/10000 train_time:12750ms step_avg:63.43ms +[2025-07-05 08:25:47] [Rank 0] step:221/10000 train_time:14076ms step_avg:63.69ms +[2025-07-05 08:25:47] [Rank 0] step:221/10000 train_time:14076ms step_avg:63.69ms +[2025-07-05 08:25:48] [Rank 0] step:241/10000 train_time:15405ms step_avg:63.92ms +[2025-07-05 08:25:48] [Rank 0] step:241/10000 train_time:15405ms step_avg:63.92ms +[2025-07-05 08:25:50] [Rank 0] step:261/10000 train_time:16735ms step_avg:64.12ms +[2025-07-05 08:25:50] [Rank 0] step:261/10000 train_time:16735ms step_avg:64.12ms +[2025-07-05 08:25:51] [Rank 0] step:281/10000 train_time:18065ms step_avg:64.29ms +[2025-07-05 08:25:51] [Rank 0] step:281/10000 train_time:18065ms step_avg:64.29ms +[2025-07-05 08:25:52] [Rank 0] step:301/10000 train_time:19395ms step_avg:64.43ms +[2025-07-05 08:25:52] [Rank 0] step:301/10000 train_time:19395ms step_avg:64.43ms +[2025-07-05 08:25:54] [Rank 0] step:321/10000 train_time:20725ms step_avg:64.57ms +[2025-07-05 08:25:54] [Rank 0] step:321/10000 train_time:20725ms step_avg:64.57ms +[2025-07-05 08:25:55] [Rank 0] step:341/10000 train_time:22057ms step_avg:64.68ms +[2025-07-05 08:25:55] [Rank 0] step:341/10000 train_time:22057ms step_avg:64.68ms +[2025-07-05 08:25:56] [Rank 0] step:361/10000 train_time:23436ms step_avg:64.92ms +[2025-07-05 08:25:56] [Rank 0] step:361/10000 train_time:23436ms step_avg:64.92ms +[2025-07-05 08:25:58] [Rank 0] step:381/10000 train_time:24785ms step_avg:65.05ms +[2025-07-05 08:25:58] [Rank 0] step:381/10000 train_time:24785ms step_avg:65.05ms +[2025-07-05 08:25:59] [Rank 0] step:401/10000 train_time:26116ms step_avg:65.13ms +[2025-07-05 08:25:59] [Rank 0] step:401/10000 train_time:26116ms step_avg:65.13ms +[2025-07-05 08:26:00] [Rank 0] step:421/10000 train_time:27448ms step_avg:65.20ms +[2025-07-05 08:26:00] [Rank 0] step:421/10000 train_time:27448ms step_avg:65.20ms +[2025-07-05 08:26:02] [Rank 0] step:441/10000 train_time:28780ms step_avg:65.26ms +[2025-07-05 08:26:02] [Rank 0] step:441/10000 train_time:28780ms step_avg:65.26ms +[2025-07-05 08:26:03] [Rank 0] step:461/10000 train_time:30113ms step_avg:65.32ms +[2025-07-05 08:26:03] [Rank 0] step:461/10000 train_time:30113ms step_avg:65.32ms +[2025-07-05 08:26:04] [Rank 0] step:481/10000 train_time:31445ms step_avg:65.37ms +[2025-07-05 08:26:04] [Rank 0] step:481/10000 train_time:31445ms step_avg:65.37ms +[2025-07-05 08:26:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:26:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:26:07] [Rank 0] PRINT: step:500/10000 train_loss:5.2521 val_loss:2.4404 train_time:33384ms step_avg:66.77ms +[2025-07-05 08:26:07] [Rank 0] PRINT: step:500/10000 train_loss:5.2521 val_loss:2.4404 train_time:33384ms step_avg:66.77ms +[2025-07-05 08:26:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:26:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f074e78c4387425ef5f2fe715b803d02a7841866 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3d8b1c2c-d511-42e6-bbb9-2d3fa295eab3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/training_log_3d8b1c2c-d511-42e6-bbb9-2d3fa295eab3.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/training_log_3d8b1c2c-d511-42e6-bbb9-2d3fa295eab3.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef15c2f52250f9df2f8da1023ae2c4f08caa32ab --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43/training_log_3d8b1c2c-d511-42e6-bbb9-2d3fa295eab3.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:51:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:51:29 2025 --- +[2025-07-05 08:51:29] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:51:29 2025 --- +[2025-07-05 08:51:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:51:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:51:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:51:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:51:29] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:51:29] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:51:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43 +[2025-07-05 08:51:29] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_43 +[2025-07-05 08:51:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:51:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:51:30] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:51:30] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:51:30] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:51:30] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:51:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:51:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:51:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:51:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:51:32] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:51:32] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:51:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:51:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:51:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:51:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:51:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:51:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:51:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:51:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:51:33] [Rank 0] PRINT: Model returns: +[2025-07-05 08:51:33] [Rank 0] PRINT: Model returns: +[2025-07-05 08:51:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:51:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:51:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:51:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:51:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:51:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:51:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:51:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:51:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:51:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:51:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:51:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:51:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:51:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:51:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:51:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:52:35] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:52:35] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:52:35] [Rank 0] PRINT: Starting training... +[2025-07-05 08:52:35] [Rank 0] PRINT: Starting training... +[2025-07-05 08:52:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:52:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:52:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:52:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:52:44] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.57ms +[2025-07-05 08:52:44] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.57ms +[2025-07-05 08:52:45] [Rank 0] step:41/10000 train_time:2337ms step_avg:57.00ms +[2025-07-05 08:52:45] [Rank 0] step:41/10000 train_time:2337ms step_avg:57.00ms +[2025-07-05 08:52:47] [Rank 0] step:61/10000 train_time:3654ms step_avg:59.91ms +[2025-07-05 08:52:47] [Rank 0] step:61/10000 train_time:3654ms step_avg:59.91ms +[2025-07-05 08:52:48] [Rank 0] step:81/10000 train_time:4973ms step_avg:61.39ms +[2025-07-05 08:52:48] [Rank 0] step:81/10000 train_time:4973ms step_avg:61.39ms +[2025-07-05 08:52:49] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 08:52:49] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 08:52:51] [Rank 0] step:121/10000 train_time:7612ms step_avg:62.91ms +[2025-07-05 08:52:51] [Rank 0] step:121/10000 train_time:7612ms step_avg:62.91ms +[2025-07-05 08:52:52] [Rank 0] step:141/10000 train_time:8933ms step_avg:63.36ms +[2025-07-05 08:52:52] [Rank 0] step:141/10000 train_time:8933ms step_avg:63.36ms +[2025-07-05 08:52:53] [Rank 0] step:161/10000 train_time:10255ms step_avg:63.69ms +[2025-07-05 08:52:53] [Rank 0] step:161/10000 train_time:10255ms step_avg:63.69ms +[2025-07-05 08:52:55] [Rank 0] step:181/10000 train_time:11578ms step_avg:63.97ms +[2025-07-05 08:52:55] [Rank 0] step:181/10000 train_time:11578ms step_avg:63.97ms +[2025-07-05 08:52:56] [Rank 0] step:201/10000 train_time:12903ms step_avg:64.19ms +[2025-07-05 08:52:56] [Rank 0] step:201/10000 train_time:12903ms step_avg:64.19ms +[2025-07-05 08:52:57] [Rank 0] step:221/10000 train_time:14228ms step_avg:64.38ms +[2025-07-05 08:52:57] [Rank 0] step:221/10000 train_time:14228ms step_avg:64.38ms +[2025-07-05 08:52:59] [Rank 0] step:241/10000 train_time:15553ms step_avg:64.54ms +[2025-07-05 08:52:59] [Rank 0] step:241/10000 train_time:15553ms step_avg:64.54ms +[2025-07-05 08:53:00] [Rank 0] step:261/10000 train_time:16887ms step_avg:64.70ms +[2025-07-05 08:53:00] [Rank 0] step:261/10000 train_time:16887ms step_avg:64.70ms +[2025-07-05 08:53:01] [Rank 0] step:281/10000 train_time:18215ms step_avg:64.82ms +[2025-07-05 08:53:01] [Rank 0] step:281/10000 train_time:18215ms step_avg:64.82ms +[2025-07-05 08:53:03] [Rank 0] step:301/10000 train_time:19592ms step_avg:65.09ms +[2025-07-05 08:53:03] [Rank 0] step:301/10000 train_time:19592ms step_avg:65.09ms +[2025-07-05 08:53:04] [Rank 0] step:321/10000 train_time:21023ms step_avg:65.49ms +[2025-07-05 08:53:04] [Rank 0] step:321/10000 train_time:21023ms step_avg:65.49ms +[2025-07-05 08:53:05] [Rank 0] step:341/10000 train_time:22353ms step_avg:65.55ms +[2025-07-05 08:53:05] [Rank 0] step:341/10000 train_time:22353ms step_avg:65.55ms +[2025-07-05 08:53:07] [Rank 0] step:361/10000 train_time:24338ms step_avg:67.42ms +[2025-07-05 08:53:07] [Rank 0] step:361/10000 train_time:24338ms step_avg:67.42ms +[2025-07-05 08:53:08] [Rank 0] step:381/10000 train_time:25055ms step_avg:65.76ms +[2025-07-05 08:53:08] [Rank 0] step:381/10000 train_time:25055ms step_avg:65.76ms +[2025-07-05 08:53:09] [Rank 0] step:401/10000 train_time:26386ms step_avg:65.80ms +[2025-07-05 08:53:09] [Rank 0] step:401/10000 train_time:26386ms step_avg:65.80ms +[2025-07-05 08:53:11] [Rank 0] step:421/10000 train_time:27716ms step_avg:65.83ms +[2025-07-05 08:53:11] [Rank 0] step:421/10000 train_time:27716ms step_avg:65.83ms +[2025-07-05 08:53:12] [Rank 0] step:441/10000 train_time:29047ms step_avg:65.87ms +[2025-07-05 08:53:12] [Rank 0] step:441/10000 train_time:29047ms step_avg:65.87ms +[2025-07-05 08:53:13] [Rank 0] step:461/10000 train_time:30377ms step_avg:65.89ms +[2025-07-05 08:53:13] [Rank 0] step:461/10000 train_time:30377ms step_avg:65.89ms +[2025-07-05 08:53:15] [Rank 0] step:481/10000 train_time:31716ms step_avg:65.94ms +[2025-07-05 08:53:15] [Rank 0] step:481/10000 train_time:31716ms step_avg:65.94ms +[2025-07-05 08:53:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:53:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:53:17] [Rank 0] PRINT: step:500/10000 train_loss:5.2931 val_loss:2.5039 train_time:33654ms step_avg:67.31ms +[2025-07-05 08:53:17] [Rank 0] PRINT: step:500/10000 train_loss:5.2931 val_loss:2.5039 train_time:33654ms step_avg:67.31ms +[2025-07-05 08:53:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:53:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ab93ea27149cc32338b7adfcacfe370e260f8779 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "88f74a59-7c73-40a4-bbd6-f21a1ddc7cf7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/training_log_88f74a59-7c73-40a4-bbd6-f21a1ddc7cf7.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/training_log_88f74a59-7c73-40a4-bbd6-f21a1ddc7cf7.txt new file mode 100644 index 0000000000000000000000000000000000000000..689ad8ad8b3448ec1e62f0cbec0e56066e4d15f8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44/training_log_88f74a59-7c73-40a4-bbd6-f21a1ddc7cf7.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:19:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:19:07 2025 --- +[2025-07-05 09:19:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:19:07 2025 --- +[2025-07-05 09:19:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:19:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:19:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:19:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:19:07] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:19:07] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:19:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44 +[2025-07-05 09:19:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_44 +[2025-07-05 09:19:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:19:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:19:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:19:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:19:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:19:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:19:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:19:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:19:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:19:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:19:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:19:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:19:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:19:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:19:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:19:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:19:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:19:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:19:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:19:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:19:10] [Rank 0] PRINT: Model returns: +[2025-07-05 09:19:10] [Rank 0] PRINT: Model returns: +[2025-07-05 09:19:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:19:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:19:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:19:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:19:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:19:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:19:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:19:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:19:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:19:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:19:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:19:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:19:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:19:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:19:10] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:19:10] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:20:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:20:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:20:13] [Rank 0] PRINT: Starting training... +[2025-07-05 09:20:13] [Rank 0] PRINT: Starting training... +[2025-07-05 09:20:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:20:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:20:22] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.43ms +[2025-07-05 09:20:22] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.43ms +[2025-07-05 09:20:23] [Rank 0] step:41/10000 train_time:2336ms step_avg:56.97ms +[2025-07-05 09:20:23] [Rank 0] step:41/10000 train_time:2336ms step_avg:56.97ms +[2025-07-05 09:20:25] [Rank 0] step:61/10000 train_time:3657ms step_avg:59.95ms +[2025-07-05 09:20:25] [Rank 0] step:61/10000 train_time:3657ms step_avg:59.95ms +[2025-07-05 09:20:26] [Rank 0] step:81/10000 train_time:4978ms step_avg:61.46ms +[2025-07-05 09:20:26] [Rank 0] step:81/10000 train_time:4978ms step_avg:61.46ms +[2025-07-05 09:20:27] [Rank 0] step:101/10000 train_time:6300ms step_avg:62.38ms +[2025-07-05 09:20:27] [Rank 0] step:101/10000 train_time:6300ms step_avg:62.38ms +[2025-07-05 09:20:29] [Rank 0] step:121/10000 train_time:7621ms step_avg:62.98ms +[2025-07-05 09:20:29] [Rank 0] step:121/10000 train_time:7621ms step_avg:62.98ms +[2025-07-05 09:20:30] [Rank 0] step:141/10000 train_time:8944ms step_avg:63.43ms +[2025-07-05 09:20:30] [Rank 0] step:141/10000 train_time:8944ms step_avg:63.43ms +[2025-07-05 09:20:31] [Rank 0] step:161/10000 train_time:10267ms step_avg:63.77ms +[2025-07-05 09:20:31] [Rank 0] step:161/10000 train_time:10267ms step_avg:63.77ms +[2025-07-05 09:20:33] [Rank 0] step:181/10000 train_time:11639ms step_avg:64.30ms +[2025-07-05 09:20:33] [Rank 0] step:181/10000 train_time:11639ms step_avg:64.30ms +[2025-07-05 09:20:34] [Rank 0] step:201/10000 train_time:12995ms step_avg:64.65ms +[2025-07-05 09:20:34] [Rank 0] step:201/10000 train_time:12995ms step_avg:64.65ms +[2025-07-05 09:20:35] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.81ms +[2025-07-05 09:20:35] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.81ms +[2025-07-05 09:20:37] [Rank 0] step:241/10000 train_time:15650ms step_avg:64.94ms +[2025-07-05 09:20:37] [Rank 0] step:241/10000 train_time:15650ms step_avg:64.94ms +[2025-07-05 09:20:38] [Rank 0] step:261/10000 train_time:16979ms step_avg:65.05ms +[2025-07-05 09:20:38] [Rank 0] step:261/10000 train_time:16979ms step_avg:65.05ms +[2025-07-05 09:20:39] [Rank 0] step:281/10000 train_time:18309ms step_avg:65.16ms +[2025-07-05 09:20:39] [Rank 0] step:281/10000 train_time:18309ms step_avg:65.16ms +[2025-07-05 09:20:41] [Rank 0] step:301/10000 train_time:19642ms step_avg:65.25ms +[2025-07-05 09:20:41] [Rank 0] step:301/10000 train_time:19642ms step_avg:65.25ms +[2025-07-05 09:20:42] [Rank 0] step:321/10000 train_time:20974ms step_avg:65.34ms +[2025-07-05 09:20:42] [Rank 0] step:321/10000 train_time:20974ms step_avg:65.34ms +[2025-07-05 09:20:43] [Rank 0] step:341/10000 train_time:22305ms step_avg:65.41ms +[2025-07-05 09:20:43] [Rank 0] step:341/10000 train_time:22305ms step_avg:65.41ms +[2025-07-05 09:20:45] [Rank 0] step:361/10000 train_time:24308ms step_avg:67.34ms +[2025-07-05 09:20:45] [Rank 0] step:361/10000 train_time:24308ms step_avg:67.34ms +[2025-07-05 09:20:46] [Rank 0] step:381/10000 train_time:25132ms step_avg:65.96ms +[2025-07-05 09:20:46] [Rank 0] step:381/10000 train_time:25132ms step_avg:65.96ms +[2025-07-05 09:20:48] [Rank 0] step:401/10000 train_time:26463ms step_avg:65.99ms +[2025-07-05 09:20:48] [Rank 0] step:401/10000 train_time:26463ms step_avg:65.99ms +[2025-07-05 09:20:49] [Rank 0] step:421/10000 train_time:27793ms step_avg:66.02ms +[2025-07-05 09:20:49] [Rank 0] step:421/10000 train_time:27793ms step_avg:66.02ms +[2025-07-05 09:20:50] [Rank 0] step:441/10000 train_time:29125ms step_avg:66.04ms +[2025-07-05 09:20:50] [Rank 0] step:441/10000 train_time:29125ms step_avg:66.04ms +[2025-07-05 09:20:52] [Rank 0] step:461/10000 train_time:30458ms step_avg:66.07ms +[2025-07-05 09:20:52] [Rank 0] step:461/10000 train_time:30458ms step_avg:66.07ms +[2025-07-05 09:20:53] [Rank 0] step:481/10000 train_time:31791ms step_avg:66.09ms +[2025-07-05 09:20:53] [Rank 0] step:481/10000 train_time:31791ms step_avg:66.09ms +[2025-07-05 09:20:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:20:55] [Rank 0] PRINT: step:500/10000 train_loss:5.2767 val_loss:2.4893 train_time:33728ms step_avg:67.46ms +[2025-07-05 09:20:55] [Rank 0] PRINT: step:500/10000 train_loss:5.2767 val_loss:2.4893 train_time:33728ms step_avg:67.46ms +[2025-07-05 09:20:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:20:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e6fb71bcb678ef55592f3b999d812c89ed76b791 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5759c62b-5224-4177-844b-a6cb54e273a3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_5759c62b-5224-4177-844b-a6cb54e273a3.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_5759c62b-5224-4177-844b-a6cb54e273a3.txt new file mode 100644 index 0000000000000000000000000000000000000000..09daa503f08f8337f77484e13332dc3f87114a23 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_5759c62b-5224-4177-844b-a6cb54e273a3.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:46:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:46:09 2025 --- +[2025-07-05 09:46:09] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:46:09 2025 --- +[2025-07-05 09:46:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:46:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:46:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:46:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:46:09] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:46:09] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:46:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45 +[2025-07-05 09:46:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45 +[2025-07-05 09:46:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:46:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:46:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:46:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:46:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:46:09] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:46:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:46:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:46:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:46:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:46:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:46:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:46:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:46:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:46:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:46:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:46:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:46:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:46:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:46:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:46:12] [Rank 0] PRINT: Model returns: +[2025-07-05 09:46:12] [Rank 0] PRINT: Model returns: +[2025-07-05 09:46:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:46:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:46:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:46:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:46:12] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:46:12] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:46:12] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:46:12] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:46:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:46:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:46:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:46:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:46:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:46:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:46:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:46:12] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:47:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:47:15] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:47:15] [Rank 0] PRINT: Starting training... +[2025-07-05 09:47:15] [Rank 0] PRINT: Starting training... +[2025-07-05 09:47:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:47:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:47:24] [Rank 0] step:21/10000 train_time:819ms step_avg:39.00ms +[2025-07-05 09:47:24] [Rank 0] step:21/10000 train_time:819ms step_avg:39.00ms +[2025-07-05 09:47:26] [Rank 0] step:41/10000 train_time:2138ms step_avg:52.15ms +[2025-07-05 09:47:26] [Rank 0] step:41/10000 train_time:2138ms step_avg:52.15ms +[2025-07-05 09:47:27] [Rank 0] step:61/10000 train_time:3456ms step_avg:56.66ms +[2025-07-05 09:47:27] [Rank 0] step:61/10000 train_time:3456ms step_avg:56.66ms +[2025-07-05 09:47:28] [Rank 0] step:81/10000 train_time:4776ms step_avg:58.97ms +[2025-07-05 09:47:28] [Rank 0] step:81/10000 train_time:4776ms step_avg:58.97ms +[2025-07-05 09:47:29] [Rank 0] step:101/10000 train_time:6097ms step_avg:60.36ms +[2025-07-05 09:47:29] [Rank 0] step:101/10000 train_time:6097ms step_avg:60.36ms +[2025-07-05 09:47:31] [Rank 0] step:121/10000 train_time:7417ms step_avg:61.30ms +[2025-07-05 09:47:31] [Rank 0] step:121/10000 train_time:7417ms step_avg:61.30ms +[2025-07-05 09:47:32] [Rank 0] step:141/10000 train_time:8739ms step_avg:61.98ms +[2025-07-05 09:47:32] [Rank 0] step:141/10000 train_time:8739ms step_avg:61.98ms +[2025-07-05 09:47:33] [Rank 0] step:161/10000 train_time:10062ms step_avg:62.50ms +[2025-07-05 09:47:33] [Rank 0] step:161/10000 train_time:10062ms step_avg:62.50ms +[2025-07-05 09:47:35] [Rank 0] step:181/10000 train_time:11386ms step_avg:62.90ms +[2025-07-05 09:47:35] [Rank 0] step:181/10000 train_time:11386ms step_avg:62.90ms +[2025-07-05 09:47:36] [Rank 0] step:201/10000 train_time:12782ms step_avg:63.59ms +[2025-07-05 09:47:36] [Rank 0] step:201/10000 train_time:12782ms step_avg:63.59ms +[2025-07-05 09:47:37] [Rank 0] step:221/10000 train_time:14108ms step_avg:63.84ms +[2025-07-05 09:47:37] [Rank 0] step:221/10000 train_time:14108ms step_avg:63.84ms +[2025-07-05 09:47:39] [Rank 0] step:241/10000 train_time:15436ms step_avg:64.05ms +[2025-07-05 09:47:39] [Rank 0] step:241/10000 train_time:15436ms step_avg:64.05ms +[2025-07-05 09:47:40] [Rank 0] step:261/10000 train_time:16767ms step_avg:64.24ms +[2025-07-05 09:47:40] [Rank 0] step:261/10000 train_time:16767ms step_avg:64.24ms +[2025-07-05 09:47:41] [Rank 0] step:281/10000 train_time:18097ms step_avg:64.40ms +[2025-07-05 09:47:41] [Rank 0] step:281/10000 train_time:18097ms step_avg:64.40ms +[2025-07-05 09:47:43] [Rank 0] step:301/10000 train_time:19426ms step_avg:64.54ms +[2025-07-05 09:47:43] [Rank 0] step:301/10000 train_time:19426ms step_avg:64.54ms +[2025-07-05 09:47:44] [Rank 0] step:321/10000 train_time:20756ms step_avg:64.66ms +[2025-07-05 09:47:44] [Rank 0] step:321/10000 train_time:20756ms step_avg:64.66ms +[2025-07-05 09:47:45] [Rank 0] step:341/10000 train_time:22088ms step_avg:64.77ms +[2025-07-05 09:47:45] [Rank 0] step:341/10000 train_time:22088ms step_avg:64.77ms +[2025-07-05 09:47:47] [Rank 0] step:361/10000 train_time:23521ms step_avg:65.16ms +[2025-07-05 09:47:47] [Rank 0] step:361/10000 train_time:23521ms step_avg:65.16ms +[2025-07-05 09:47:48] [Rank 0] step:381/10000 train_time:24921ms step_avg:65.41ms +[2025-07-05 09:47:48] [Rank 0] step:381/10000 train_time:24921ms step_avg:65.41ms +[2025-07-05 09:47:50] [Rank 0] step:401/10000 train_time:26252ms step_avg:65.47ms +[2025-07-05 09:47:50] [Rank 0] step:401/10000 train_time:26252ms step_avg:65.47ms +[2025-07-05 09:47:51] [Rank 0] step:421/10000 train_time:27672ms step_avg:65.73ms +[2025-07-05 09:47:51] [Rank 0] step:421/10000 train_time:27672ms step_avg:65.73ms +[2025-07-05 09:47:52] [Rank 0] step:441/10000 train_time:29003ms step_avg:65.77ms +[2025-07-05 09:47:52] [Rank 0] step:441/10000 train_time:29003ms step_avg:65.77ms +[2025-07-05 09:47:54] [Rank 0] step:461/10000 train_time:30334ms step_avg:65.80ms +[2025-07-05 09:47:54] [Rank 0] step:461/10000 train_time:30334ms step_avg:65.80ms +[2025-07-05 09:47:55] [Rank 0] step:481/10000 train_time:31666ms step_avg:65.83ms +[2025-07-05 09:47:55] [Rank 0] step:481/10000 train_time:31666ms step_avg:65.83ms +[2025-07-05 09:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:47:57] [Rank 0] PRINT: step:500/10000 train_loss:5.2692 val_loss:2.4710 train_time:33605ms step_avg:67.21ms +[2025-07-05 09:47:57] [Rank 0] PRINT: step:500/10000 train_loss:5.2692 val_loss:2.4710 train_time:33605ms step_avg:67.21ms +[2025-07-05 09:47:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:47:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..eedd44d8ef7c54483c07e0f73748130c79362d03 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3902565a-e7b0-45d3-a506-710dc475f6d0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/training_log_3902565a-e7b0-45d3-a506-710dc475f6d0.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/training_log_3902565a-e7b0-45d3-a506-710dc475f6d0.txt new file mode 100644 index 0000000000000000000000000000000000000000..52d8e778e156b0f0a586968e3dac5ae8209d43ae --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46/training_log_3902565a-e7b0-45d3-a506-710dc475f6d0.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:13:24] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:13:24 2025 --- +[2025-07-05 10:13:24] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:13:24 2025 --- +[2025-07-05 10:13:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:13:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:13:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:13:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:13:24] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:13:24] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:13:24] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46 +[2025-07-05 10:13:24] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_46 +[2025-07-05 10:13:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:13:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:13:24] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:13:24] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:13:24] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:13:24] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:13:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:13:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:13:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:13:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:13:26] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:13:26] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:13:27] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:13:27] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:13:27] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:13:27] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:13:27] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:13:27] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:13:27] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:13:27] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:13:27] [Rank 0] PRINT: Model returns: +[2025-07-05 10:13:27] [Rank 0] PRINT: Model returns: +[2025-07-05 10:13:27] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:13:27] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:13:27] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:13:27] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:13:27] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 10:13:27] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 10:13:27] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:13:27] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:13:27] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:13:27] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:13:27] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:13:27] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:13:27] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:13:27] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:13:27] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:13:27] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:14:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:14:31] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:14:31] [Rank 0] PRINT: Starting training... +[2025-07-05 10:14:31] [Rank 0] PRINT: Starting training... +[2025-07-05 10:14:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:14:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:14:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:14:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:14:40] [Rank 0] step:21/10000 train_time:1027ms step_avg:48.91ms +[2025-07-05 10:14:40] [Rank 0] step:21/10000 train_time:1027ms step_avg:48.91ms +[2025-07-05 10:14:41] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.27ms +[2025-07-05 10:14:41] [Rank 0] step:41/10000 train_time:2348ms step_avg:57.27ms +[2025-07-05 10:14:42] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.16ms +[2025-07-05 10:14:42] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.16ms +[2025-07-05 10:14:44] [Rank 0] step:81/10000 train_time:4993ms step_avg:61.65ms +[2025-07-05 10:14:44] [Rank 0] step:81/10000 train_time:4993ms step_avg:61.65ms +[2025-07-05 10:14:45] [Rank 0] step:101/10000 train_time:6316ms step_avg:62.54ms +[2025-07-05 10:14:45] [Rank 0] step:101/10000 train_time:6316ms step_avg:62.54ms +[2025-07-05 10:14:46] [Rank 0] step:121/10000 train_time:7641ms step_avg:63.15ms +[2025-07-05 10:14:46] [Rank 0] step:121/10000 train_time:7641ms step_avg:63.15ms +[2025-07-05 10:14:48] [Rank 0] step:141/10000 train_time:8965ms step_avg:63.58ms +[2025-07-05 10:14:48] [Rank 0] step:141/10000 train_time:8965ms step_avg:63.58ms +[2025-07-05 10:14:49] [Rank 0] step:161/10000 train_time:10292ms step_avg:63.93ms +[2025-07-05 10:14:49] [Rank 0] step:161/10000 train_time:10292ms step_avg:63.93ms +[2025-07-05 10:14:50] [Rank 0] step:181/10000 train_time:11621ms step_avg:64.20ms +[2025-07-05 10:14:50] [Rank 0] step:181/10000 train_time:11621ms step_avg:64.20ms +[2025-07-05 10:14:52] [Rank 0] step:201/10000 train_time:13021ms step_avg:64.78ms +[2025-07-05 10:14:52] [Rank 0] step:201/10000 train_time:13021ms step_avg:64.78ms +[2025-07-05 10:14:53] [Rank 0] step:221/10000 train_time:14351ms step_avg:64.94ms +[2025-07-05 10:14:53] [Rank 0] step:221/10000 train_time:14351ms step_avg:64.94ms +[2025-07-05 10:14:54] [Rank 0] step:241/10000 train_time:15683ms step_avg:65.07ms +[2025-07-05 10:14:54] [Rank 0] step:241/10000 train_time:15683ms step_avg:65.07ms +[2025-07-05 10:14:56] [Rank 0] step:261/10000 train_time:17015ms step_avg:65.19ms +[2025-07-05 10:14:56] [Rank 0] step:261/10000 train_time:17015ms step_avg:65.19ms +[2025-07-05 10:14:57] [Rank 0] step:281/10000 train_time:18349ms step_avg:65.30ms +[2025-07-05 10:14:57] [Rank 0] step:281/10000 train_time:18349ms step_avg:65.30ms +[2025-07-05 10:14:58] [Rank 0] step:301/10000 train_time:19683ms step_avg:65.39ms +[2025-07-05 10:14:58] [Rank 0] step:301/10000 train_time:19683ms step_avg:65.39ms +[2025-07-05 10:15:00] [Rank 0] step:321/10000 train_time:21015ms step_avg:65.47ms +[2025-07-05 10:15:00] [Rank 0] step:321/10000 train_time:21015ms step_avg:65.47ms +[2025-07-05 10:15:01] [Rank 0] step:341/10000 train_time:22348ms step_avg:65.54ms +[2025-07-05 10:15:01] [Rank 0] step:341/10000 train_time:22348ms step_avg:65.54ms +[2025-07-05 10:15:02] [Rank 0] step:361/10000 train_time:23683ms step_avg:65.60ms +[2025-07-05 10:15:02] [Rank 0] step:361/10000 train_time:23683ms step_avg:65.60ms +[2025-07-05 10:15:04] [Rank 0] step:381/10000 train_time:25057ms step_avg:65.77ms +[2025-07-05 10:15:04] [Rank 0] step:381/10000 train_time:25057ms step_avg:65.77ms +[2025-07-05 10:15:05] [Rank 0] step:401/10000 train_time:26392ms step_avg:65.81ms +[2025-07-05 10:15:05] [Rank 0] step:401/10000 train_time:26392ms step_avg:65.81ms +[2025-07-05 10:15:06] [Rank 0] step:421/10000 train_time:27726ms step_avg:65.86ms +[2025-07-05 10:15:06] [Rank 0] step:421/10000 train_time:27726ms step_avg:65.86ms +[2025-07-05 10:15:08] [Rank 0] step:441/10000 train_time:29162ms step_avg:66.13ms +[2025-07-05 10:15:08] [Rank 0] step:441/10000 train_time:29162ms step_avg:66.13ms +[2025-07-05 10:15:09] [Rank 0] step:461/10000 train_time:30496ms step_avg:66.15ms +[2025-07-05 10:15:09] [Rank 0] step:461/10000 train_time:30496ms step_avg:66.15ms +[2025-07-05 10:15:10] [Rank 0] step:481/10000 train_time:31831ms step_avg:66.18ms +[2025-07-05 10:15:10] [Rank 0] step:481/10000 train_time:31831ms step_avg:66.18ms +[2025-07-05 10:15:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:15:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:15:13] [Rank 0] PRINT: step:500/10000 train_loss:5.2668 val_loss:2.4785 train_time:33773ms step_avg:67.55ms +[2025-07-05 10:15:13] [Rank 0] PRINT: step:500/10000 train_loss:5.2668 val_loss:2.4785 train_time:33773ms step_avg:67.55ms +[2025-07-05 10:15:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:15:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3b21ffb125b85762be793ab7a23627e5e8e309a2 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2a154fd1-0e1a-470b-ae5e-1afc01efab94", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/training_log_2a154fd1-0e1a-470b-ae5e-1afc01efab94.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/training_log_2a154fd1-0e1a-470b-ae5e-1afc01efab94.txt new file mode 100644 index 0000000000000000000000000000000000000000..bcd2e25dc416c60c458f0b828648e8879b9deec7 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47/training_log_2a154fd1-0e1a-470b-ae5e-1afc01efab94.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:32:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:32:46 2025 --- +[2025-07-05 08:32:46] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:32:46 2025 --- +[2025-07-05 08:32:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:32:46] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 08:32:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:32:46] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:32:46] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:32:46] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:32:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47 +[2025-07-05 08:32:46] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_47 +[2025-07-05 08:32:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:32:46] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:32:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:32:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:32:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:32:46] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:32:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:32:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:32:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:32:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:32:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:32:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:32:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:32:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:32:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:32:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:32:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:32:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:32:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:32:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:32:49] [Rank 0] PRINT: Model returns: +[2025-07-05 08:32:49] [Rank 0] PRINT: Model returns: +[2025-07-05 08:32:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:32:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:32:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:32:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:32:49] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:32:49] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 08:32:49] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:32:49] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:32:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:32:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:32:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:32:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:32:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:32:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:32:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:32:49] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:33:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:33:55] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:33:56] [Rank 0] PRINT: Starting training... +[2025-07-05 08:33:56] [Rank 0] PRINT: Starting training... +[2025-07-05 08:33:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:33:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:34:04] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:34:04] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:34:06] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.57ms +[2025-07-05 08:34:06] [Rank 0] step:21/10000 train_time:1020ms step_avg:48.57ms +[2025-07-05 08:34:07] [Rank 0] step:41/10000 train_time:2338ms step_avg:57.02ms +[2025-07-05 08:34:07] [Rank 0] step:41/10000 train_time:2338ms step_avg:57.02ms +[2025-07-05 08:34:08] [Rank 0] step:61/10000 train_time:3657ms step_avg:59.95ms +[2025-07-05 08:34:08] [Rank 0] step:61/10000 train_time:3657ms step_avg:59.95ms +[2025-07-05 08:34:10] [Rank 0] step:81/10000 train_time:4977ms step_avg:61.45ms +[2025-07-05 08:34:10] [Rank 0] step:81/10000 train_time:4977ms step_avg:61.45ms +[2025-07-05 08:34:11] [Rank 0] step:101/10000 train_time:6298ms step_avg:62.36ms +[2025-07-05 08:34:11] [Rank 0] step:101/10000 train_time:6298ms step_avg:62.36ms +[2025-07-05 08:34:12] [Rank 0] step:121/10000 train_time:7618ms step_avg:62.96ms +[2025-07-05 08:34:12] [Rank 0] step:121/10000 train_time:7618ms step_avg:62.96ms +[2025-07-05 08:34:14] [Rank 0] step:141/10000 train_time:8940ms step_avg:63.41ms +[2025-07-05 08:34:14] [Rank 0] step:141/10000 train_time:8940ms step_avg:63.41ms +[2025-07-05 08:34:15] [Rank 0] step:161/10000 train_time:10264ms step_avg:63.75ms +[2025-07-05 08:34:15] [Rank 0] step:161/10000 train_time:10264ms step_avg:63.75ms +[2025-07-05 08:34:16] [Rank 0] step:181/10000 train_time:11641ms step_avg:64.31ms +[2025-07-05 08:34:16] [Rank 0] step:181/10000 train_time:11641ms step_avg:64.31ms +[2025-07-05 08:34:18] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 08:34:18] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 08:34:19] [Rank 0] step:221/10000 train_time:14291ms step_avg:64.67ms +[2025-07-05 08:34:19] [Rank 0] step:221/10000 train_time:14291ms step_avg:64.67ms +[2025-07-05 08:34:20] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 08:34:20] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 08:34:22] [Rank 0] step:261/10000 train_time:16949ms step_avg:64.94ms +[2025-07-05 08:34:22] [Rank 0] step:261/10000 train_time:16949ms step_avg:64.94ms +[2025-07-05 08:34:23] [Rank 0] step:281/10000 train_time:18281ms step_avg:65.06ms +[2025-07-05 08:34:23] [Rank 0] step:281/10000 train_time:18281ms step_avg:65.06ms +[2025-07-05 08:34:24] [Rank 0] step:301/10000 train_time:19611ms step_avg:65.15ms +[2025-07-05 08:34:24] [Rank 0] step:301/10000 train_time:19611ms step_avg:65.15ms +[2025-07-05 08:34:26] [Rank 0] step:321/10000 train_time:20941ms step_avg:65.24ms +[2025-07-05 08:34:26] [Rank 0] step:321/10000 train_time:20941ms step_avg:65.24ms +[2025-07-05 08:34:27] [Rank 0] step:341/10000 train_time:22273ms step_avg:65.32ms +[2025-07-05 08:34:27] [Rank 0] step:341/10000 train_time:22273ms step_avg:65.32ms +[2025-07-05 08:34:28] [Rank 0] step:361/10000 train_time:23656ms step_avg:65.53ms +[2025-07-05 08:34:28] [Rank 0] step:361/10000 train_time:23656ms step_avg:65.53ms +[2025-07-05 08:34:30] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-05 08:34:30] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-05 08:34:31] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 08:34:31] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 08:34:32] [Rank 0] step:421/10000 train_time:27663ms step_avg:65.71ms +[2025-07-05 08:34:32] [Rank 0] step:421/10000 train_time:27663ms step_avg:65.71ms +[2025-07-05 08:34:34] [Rank 0] step:441/10000 train_time:28994ms step_avg:65.75ms +[2025-07-05 08:34:34] [Rank 0] step:441/10000 train_time:28994ms step_avg:65.75ms +[2025-07-05 08:34:35] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 08:34:35] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 08:34:36] [Rank 0] step:481/10000 train_time:31657ms step_avg:65.81ms +[2025-07-05 08:34:36] [Rank 0] step:481/10000 train_time:31657ms step_avg:65.81ms +[2025-07-05 08:34:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:34:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:34:39] [Rank 0] PRINT: step:500/10000 train_loss:5.2721 val_loss:2.4811 train_time:33593ms step_avg:67.19ms +[2025-07-05 08:34:39] [Rank 0] PRINT: step:500/10000 train_loss:5.2721 val_loss:2.4811 train_time:33593ms step_avg:67.19ms +[2025-07-05 08:34:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:34:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..10a59487e8281a0c408cd5e547a5a9fe676301c0 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "168f2e26-dac8-4e9c-bdfc-a05a18ab77ea", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_168f2e26-dac8-4e9c-bdfc-a05a18ab77ea.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_168f2e26-dac8-4e9c-bdfc-a05a18ab77ea.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf4062e9d181660a0686af575fb4d2aa40ffa68f --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_168f2e26-dac8-4e9c-bdfc-a05a18ab77ea.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:00:14] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:00:14 2025 --- +[2025-07-05 09:00:14] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:00:14 2025 --- +[2025-07-05 09:00:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:00:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:00:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:00:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:00:14] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:00:14] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 09:00:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48 +[2025-07-05 09:00:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48 +[2025-07-05 09:00:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:00:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:00:15] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:00:15] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:00:15] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:00:15] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:00:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:00:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:00:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:00:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:00:17] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:00:17] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:00:17] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:00:17] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:00:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:00:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:00:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:00:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:00:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:00:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:00:17] [Rank 0] PRINT: Model returns: +[2025-07-05 09:00:17] [Rank 0] PRINT: Model returns: +[2025-07-05 09:00:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:00:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:00:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:00:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:00:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:00:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:00:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:00:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:00:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:00:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:00:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:00:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:00:17] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:00:17] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:00:17] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:00:17] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:01:22] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:01:22] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:01:23] [Rank 0] PRINT: Starting training... +[2025-07-05 09:01:23] [Rank 0] PRINT: Starting training... +[2025-07-05 09:01:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:01:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:01:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:01:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:01:31] [Rank 0] step:21/10000 train_time:1018ms step_avg:48.48ms +[2025-07-05 09:01:31] [Rank 0] step:21/10000 train_time:1018ms step_avg:48.48ms +[2025-07-05 09:01:33] [Rank 0] step:41/10000 train_time:2335ms step_avg:56.95ms +[2025-07-05 09:01:33] [Rank 0] step:41/10000 train_time:2335ms step_avg:56.95ms +[2025-07-05 09:01:34] [Rank 0] step:61/10000 train_time:3652ms step_avg:59.87ms +[2025-07-05 09:01:34] [Rank 0] step:61/10000 train_time:3652ms step_avg:59.87ms +[2025-07-05 09:01:35] [Rank 0] step:81/10000 train_time:4970ms step_avg:61.36ms +[2025-07-05 09:01:35] [Rank 0] step:81/10000 train_time:4970ms step_avg:61.36ms +[2025-07-05 09:01:37] [Rank 0] step:101/10000 train_time:6289ms step_avg:62.26ms +[2025-07-05 09:01:37] [Rank 0] step:101/10000 train_time:6289ms step_avg:62.26ms +[2025-07-05 09:01:38] [Rank 0] step:121/10000 train_time:7606ms step_avg:62.86ms +[2025-07-05 09:01:38] [Rank 0] step:121/10000 train_time:7606ms step_avg:62.86ms +[2025-07-05 09:01:39] [Rank 0] step:141/10000 train_time:8926ms step_avg:63.30ms +[2025-07-05 09:01:39] [Rank 0] step:141/10000 train_time:8926ms step_avg:63.30ms +[2025-07-05 09:01:41] [Rank 0] step:161/10000 train_time:10246ms step_avg:63.64ms +[2025-07-05 09:01:41] [Rank 0] step:161/10000 train_time:10246ms step_avg:63.64ms +[2025-07-05 09:01:42] [Rank 0] step:181/10000 train_time:11617ms step_avg:64.18ms +[2025-07-05 09:01:42] [Rank 0] step:181/10000 train_time:11617ms step_avg:64.18ms +[2025-07-05 09:01:43] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 09:01:43] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 09:01:45] [Rank 0] step:221/10000 train_time:14291ms step_avg:64.67ms +[2025-07-05 09:01:45] [Rank 0] step:221/10000 train_time:14291ms step_avg:64.67ms +[2025-07-05 09:01:46] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 09:01:46] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 09:01:47] [Rank 0] step:261/10000 train_time:16948ms step_avg:64.93ms +[2025-07-05 09:01:47] [Rank 0] step:261/10000 train_time:16948ms step_avg:64.93ms +[2025-07-05 09:01:49] [Rank 0] step:281/10000 train_time:18277ms step_avg:65.04ms +[2025-07-05 09:01:49] [Rank 0] step:281/10000 train_time:18277ms step_avg:65.04ms +[2025-07-05 09:01:50] [Rank 0] step:301/10000 train_time:19607ms step_avg:65.14ms +[2025-07-05 09:01:50] [Rank 0] step:301/10000 train_time:19607ms step_avg:65.14ms +[2025-07-05 09:01:51] [Rank 0] step:321/10000 train_time:20937ms step_avg:65.22ms +[2025-07-05 09:01:51] [Rank 0] step:321/10000 train_time:20937ms step_avg:65.22ms +[2025-07-05 09:01:53] [Rank 0] step:341/10000 train_time:22267ms step_avg:65.30ms +[2025-07-05 09:01:53] [Rank 0] step:341/10000 train_time:22267ms step_avg:65.30ms +[2025-07-05 09:01:54] [Rank 0] step:361/10000 train_time:23850ms step_avg:66.07ms +[2025-07-05 09:01:54] [Rank 0] step:361/10000 train_time:23850ms step_avg:66.07ms +[2025-07-05 09:01:55] [Rank 0] step:381/10000 train_time:24999ms step_avg:65.61ms +[2025-07-05 09:01:55] [Rank 0] step:381/10000 train_time:24999ms step_avg:65.61ms +[2025-07-05 09:01:57] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 09:01:57] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 09:01:58] [Rank 0] step:421/10000 train_time:27661ms step_avg:65.70ms +[2025-07-05 09:01:58] [Rank 0] step:421/10000 train_time:27661ms step_avg:65.70ms +[2025-07-05 09:01:59] [Rank 0] step:441/10000 train_time:28993ms step_avg:65.74ms +[2025-07-05 09:01:59] [Rank 0] step:441/10000 train_time:28993ms step_avg:65.74ms +[2025-07-05 09:02:01] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 09:02:01] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 09:02:02] [Rank 0] step:481/10000 train_time:31658ms step_avg:65.82ms +[2025-07-05 09:02:02] [Rank 0] step:481/10000 train_time:31658ms step_avg:65.82ms +[2025-07-05 09:02:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:02:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:02:04] [Rank 0] PRINT: step:500/10000 train_loss:5.3062 val_loss:2.5178 train_time:33595ms step_avg:67.19ms +[2025-07-05 09:02:04] [Rank 0] PRINT: step:500/10000 train_loss:5.3062 val_loss:2.5178 train_time:33595ms step_avg:67.19ms +[2025-07-05 09:02:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:02:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..68878ce2b062384ad80927ea7f619bee10a4c8b5 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a72b950e-9654-4bf1-8d04-be35fdb8ace7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/training_log_a72b950e-9654-4bf1-8d04-be35fdb8ace7.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/training_log_a72b950e-9654-4bf1-8d04-be35fdb8ace7.txt new file mode 100644 index 0000000000000000000000000000000000000000..351d9ceb5fce37e5355eb6b844a49bbb6f3442cd --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49/training_log_a72b950e-9654-4bf1-8d04-be35fdb8ace7.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:27:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:27:43 2025 --- +[2025-07-05 09:27:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:27:43 2025 --- +[2025-07-05 09:27:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:27:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:27:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:27:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:27:43] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:27:43] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:27:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49 +[2025-07-05 09:27:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_49 +[2025-07-05 09:27:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:27:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:27:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:27:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:27:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:27:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:27:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:27:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:27:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:27:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:27:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:27:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:27:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:27:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:27:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:27:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:27:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:27:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:27:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:27:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:27:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:27:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:27:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:27:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:27:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:27:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:27:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:27:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:27:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:27:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:27:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:27:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:27:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:27:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:27:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:27:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:27:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:27:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:28:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:28:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:28:50] [Rank 0] PRINT: Starting training... +[2025-07-05 09:28:50] [Rank 0] PRINT: Starting training... +[2025-07-05 09:28:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:28:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:28:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:28:57] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:28:58] [Rank 0] step:21/10000 train_time:921ms step_avg:43.87ms +[2025-07-05 09:28:58] [Rank 0] step:21/10000 train_time:921ms step_avg:43.87ms +[2025-07-05 09:28:59] [Rank 0] step:41/10000 train_time:2240ms step_avg:54.62ms +[2025-07-05 09:28:59] [Rank 0] step:41/10000 train_time:2240ms step_avg:54.62ms +[2025-07-05 09:29:01] [Rank 0] step:61/10000 train_time:3558ms step_avg:58.33ms +[2025-07-05 09:29:01] [Rank 0] step:61/10000 train_time:3558ms step_avg:58.33ms +[2025-07-05 09:29:02] [Rank 0] step:81/10000 train_time:4878ms step_avg:60.22ms +[2025-07-05 09:29:02] [Rank 0] step:81/10000 train_time:4878ms step_avg:60.22ms +[2025-07-05 09:29:03] [Rank 0] step:101/10000 train_time:6198ms step_avg:61.36ms +[2025-07-05 09:29:03] [Rank 0] step:101/10000 train_time:6198ms step_avg:61.36ms +[2025-07-05 09:29:05] [Rank 0] step:121/10000 train_time:7518ms step_avg:62.13ms +[2025-07-05 09:29:05] [Rank 0] step:121/10000 train_time:7518ms step_avg:62.13ms +[2025-07-05 09:29:06] [Rank 0] step:141/10000 train_time:8840ms step_avg:62.70ms +[2025-07-05 09:29:06] [Rank 0] step:141/10000 train_time:8840ms step_avg:62.70ms +[2025-07-05 09:29:07] [Rank 0] step:161/10000 train_time:10164ms step_avg:63.13ms +[2025-07-05 09:29:07] [Rank 0] step:161/10000 train_time:10164ms step_avg:63.13ms +[2025-07-05 09:29:09] [Rank 0] step:181/10000 train_time:11488ms step_avg:63.47ms +[2025-07-05 09:29:09] [Rank 0] step:181/10000 train_time:11488ms step_avg:63.47ms +[2025-07-05 09:29:10] [Rank 0] step:201/10000 train_time:12815ms step_avg:63.76ms +[2025-07-05 09:29:10] [Rank 0] step:201/10000 train_time:12815ms step_avg:63.76ms +[2025-07-05 09:29:11] [Rank 0] step:221/10000 train_time:14141ms step_avg:63.99ms +[2025-07-05 09:29:11] [Rank 0] step:221/10000 train_time:14141ms step_avg:63.99ms +[2025-07-05 09:29:13] [Rank 0] step:241/10000 train_time:15468ms step_avg:64.18ms +[2025-07-05 09:29:13] [Rank 0] step:241/10000 train_time:15468ms step_avg:64.18ms +[2025-07-05 09:29:14] [Rank 0] step:261/10000 train_time:16798ms step_avg:64.36ms +[2025-07-05 09:29:14] [Rank 0] step:261/10000 train_time:16798ms step_avg:64.36ms +[2025-07-05 09:29:15] [Rank 0] step:281/10000 train_time:18128ms step_avg:64.51ms +[2025-07-05 09:29:15] [Rank 0] step:281/10000 train_time:18128ms step_avg:64.51ms +[2025-07-05 09:29:17] [Rank 0] step:301/10000 train_time:19458ms step_avg:64.64ms +[2025-07-05 09:29:17] [Rank 0] step:301/10000 train_time:19458ms step_avg:64.64ms +[2025-07-05 09:29:18] [Rank 0] step:321/10000 train_time:20787ms step_avg:64.76ms +[2025-07-05 09:29:18] [Rank 0] step:321/10000 train_time:20787ms step_avg:64.76ms +[2025-07-05 09:29:19] [Rank 0] step:341/10000 train_time:22118ms step_avg:64.86ms +[2025-07-05 09:29:19] [Rank 0] step:341/10000 train_time:22118ms step_avg:64.86ms +[2025-07-05 09:29:21] [Rank 0] step:361/10000 train_time:23703ms step_avg:65.66ms +[2025-07-05 09:29:21] [Rank 0] step:361/10000 train_time:23703ms step_avg:65.66ms +[2025-07-05 09:29:22] [Rank 0] step:381/10000 train_time:24847ms step_avg:65.21ms +[2025-07-05 09:29:22] [Rank 0] step:381/10000 train_time:24847ms step_avg:65.21ms +[2025-07-05 09:29:23] [Rank 0] step:401/10000 train_time:26178ms step_avg:65.28ms +[2025-07-05 09:29:23] [Rank 0] step:401/10000 train_time:26178ms step_avg:65.28ms +[2025-07-05 09:29:25] [Rank 0] step:421/10000 train_time:27510ms step_avg:65.34ms +[2025-07-05 09:29:25] [Rank 0] step:421/10000 train_time:27510ms step_avg:65.34ms +[2025-07-05 09:29:26] [Rank 0] step:441/10000 train_time:28840ms step_avg:65.40ms +[2025-07-05 09:29:26] [Rank 0] step:441/10000 train_time:28840ms step_avg:65.40ms +[2025-07-05 09:29:27] [Rank 0] step:461/10000 train_time:30172ms step_avg:65.45ms +[2025-07-05 09:29:27] [Rank 0] step:461/10000 train_time:30172ms step_avg:65.45ms +[2025-07-05 09:29:29] [Rank 0] step:481/10000 train_time:31505ms step_avg:65.50ms +[2025-07-05 09:29:29] [Rank 0] step:481/10000 train_time:31505ms step_avg:65.50ms +[2025-07-05 09:29:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:29:31] [Rank 0] PRINT: step:500/10000 train_loss:5.2584 val_loss:2.4469 train_time:33443ms step_avg:66.89ms +[2025-07-05 09:29:31] [Rank 0] PRINT: step:500/10000 train_loss:5.2584 val_loss:2.4469 train_time:33443ms step_avg:66.89ms +[2025-07-05 09:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bc447009ff7371bf56e3593bc302745b848f14c6 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "ac47ee40-8aa2-46af-9245-2ab46297d361", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/training_log_ac47ee40-8aa2-46af-9245-2ab46297d361.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/training_log_ac47ee40-8aa2-46af-9245-2ab46297d361.txt new file mode 100644 index 0000000000000000000000000000000000000000..5960e338f6686de73540c1f8be36f7441ed14ef8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50/training_log_ac47ee40-8aa2-46af-9245-2ab46297d361.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:55:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:55:07 2025 --- +[2025-07-05 09:55:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:55:07 2025 --- +[2025-07-05 09:55:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:55:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 09:55:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:55:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:55:07] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:55:07] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:55:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50 +[2025-07-05 09:55:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_50 +[2025-07-05 09:55:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:55:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:55:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:55:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:55:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:55:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:55:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:55:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:55:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:55:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:55:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:55:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:55:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:55:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:55:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:55:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:55:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:55:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:55:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:55:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:55:10] [Rank 0] PRINT: Model returns: +[2025-07-05 09:55:10] [Rank 0] PRINT: Model returns: +[2025-07-05 09:55:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:55:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:55:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:55:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:55:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:55:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 09:55:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:55:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:55:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:55:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:55:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:55:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:55:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:55:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:55:10] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:55:10] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:56:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:56:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:56:14] [Rank 0] PRINT: Starting training... +[2025-07-05 09:56:14] [Rank 0] PRINT: Starting training... +[2025-07-05 09:56:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:56:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:56:22] [Rank 0] step:21/10000 train_time:822ms step_avg:39.12ms +[2025-07-05 09:56:22] [Rank 0] step:21/10000 train_time:822ms step_avg:39.12ms +[2025-07-05 09:56:24] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.21ms +[2025-07-05 09:56:24] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.21ms +[2025-07-05 09:56:25] [Rank 0] step:61/10000 train_time:3461ms step_avg:56.73ms +[2025-07-05 09:56:25] [Rank 0] step:61/10000 train_time:3461ms step_avg:56.73ms +[2025-07-05 09:56:26] [Rank 0] step:81/10000 train_time:4781ms step_avg:59.03ms +[2025-07-05 09:56:26] [Rank 0] step:81/10000 train_time:4781ms step_avg:59.03ms +[2025-07-05 09:56:28] [Rank 0] step:101/10000 train_time:6102ms step_avg:60.42ms +[2025-07-05 09:56:28] [Rank 0] step:101/10000 train_time:6102ms step_avg:60.42ms +[2025-07-05 09:56:29] [Rank 0] step:121/10000 train_time:7421ms step_avg:61.33ms +[2025-07-05 09:56:29] [Rank 0] step:121/10000 train_time:7421ms step_avg:61.33ms +[2025-07-05 09:56:30] [Rank 0] step:141/10000 train_time:8745ms step_avg:62.02ms +[2025-07-05 09:56:30] [Rank 0] step:141/10000 train_time:8745ms step_avg:62.02ms +[2025-07-05 09:56:32] [Rank 0] step:161/10000 train_time:10069ms step_avg:62.54ms +[2025-07-05 09:56:32] [Rank 0] step:161/10000 train_time:10069ms step_avg:62.54ms +[2025-07-05 09:56:33] [Rank 0] step:181/10000 train_time:11443ms step_avg:63.22ms +[2025-07-05 09:56:33] [Rank 0] step:181/10000 train_time:11443ms step_avg:63.22ms +[2025-07-05 09:56:34] [Rank 0] step:201/10000 train_time:12791ms step_avg:63.64ms +[2025-07-05 09:56:34] [Rank 0] step:201/10000 train_time:12791ms step_avg:63.64ms +[2025-07-05 09:56:36] [Rank 0] step:221/10000 train_time:14123ms step_avg:63.91ms +[2025-07-05 09:56:36] [Rank 0] step:221/10000 train_time:14123ms step_avg:63.91ms +[2025-07-05 09:56:37] [Rank 0] step:241/10000 train_time:15455ms step_avg:64.13ms +[2025-07-05 09:56:37] [Rank 0] step:241/10000 train_time:15455ms step_avg:64.13ms +[2025-07-05 09:56:38] [Rank 0] step:261/10000 train_time:16788ms step_avg:64.32ms +[2025-07-05 09:56:38] [Rank 0] step:261/10000 train_time:16788ms step_avg:64.32ms +[2025-07-05 09:56:40] [Rank 0] step:281/10000 train_time:18121ms step_avg:64.49ms +[2025-07-05 09:56:40] [Rank 0] step:281/10000 train_time:18121ms step_avg:64.49ms +[2025-07-05 09:56:41] [Rank 0] step:301/10000 train_time:19456ms step_avg:64.64ms +[2025-07-05 09:56:41] [Rank 0] step:301/10000 train_time:19456ms step_avg:64.64ms +[2025-07-05 09:56:42] [Rank 0] step:321/10000 train_time:20789ms step_avg:64.76ms +[2025-07-05 09:56:42] [Rank 0] step:321/10000 train_time:20789ms step_avg:64.76ms +[2025-07-05 09:56:44] [Rank 0] step:341/10000 train_time:22123ms step_avg:64.88ms +[2025-07-05 09:56:44] [Rank 0] step:341/10000 train_time:22123ms step_avg:64.88ms +[2025-07-05 09:56:45] [Rank 0] step:361/10000 train_time:23508ms step_avg:65.12ms +[2025-07-05 09:56:45] [Rank 0] step:361/10000 train_time:23508ms step_avg:65.12ms +[2025-07-05 09:56:46] [Rank 0] step:381/10000 train_time:24860ms step_avg:65.25ms +[2025-07-05 09:56:46] [Rank 0] step:381/10000 train_time:24860ms step_avg:65.25ms +[2025-07-05 09:56:48] [Rank 0] step:401/10000 train_time:26197ms step_avg:65.33ms +[2025-07-05 09:56:48] [Rank 0] step:401/10000 train_time:26197ms step_avg:65.33ms +[2025-07-05 09:56:49] [Rank 0] step:421/10000 train_time:27531ms step_avg:65.39ms +[2025-07-05 09:56:49] [Rank 0] step:421/10000 train_time:27531ms step_avg:65.39ms +[2025-07-05 09:56:50] [Rank 0] step:441/10000 train_time:28864ms step_avg:65.45ms +[2025-07-05 09:56:50] [Rank 0] step:441/10000 train_time:28864ms step_avg:65.45ms +[2025-07-05 09:56:52] [Rank 0] step:461/10000 train_time:30199ms step_avg:65.51ms +[2025-07-05 09:56:52] [Rank 0] step:461/10000 train_time:30199ms step_avg:65.51ms +[2025-07-05 09:56:53] [Rank 0] step:481/10000 train_time:31533ms step_avg:65.56ms +[2025-07-05 09:56:53] [Rank 0] step:481/10000 train_time:31533ms step_avg:65.56ms +[2025-07-05 09:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:56:55] [Rank 0] PRINT: step:500/10000 train_loss:5.3608 val_loss:2.5873 train_time:33477ms step_avg:66.95ms +[2025-07-05 09:56:55] [Rank 0] PRINT: step:500/10000 train_loss:5.3608 val_loss:2.5873 train_time:33477ms step_avg:66.95ms +[2025-07-05 09:56:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:56:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a5a2dbbf0d3da8d91bbf756dd3fd05010041d3dd --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "df914990-e9e0-4938-bd86-82b27524e6dc", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/training_log_df914990-e9e0-4938-bd86-82b27524e6dc.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/training_log_df914990-e9e0-4938-bd86-82b27524e6dc.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2bd51b512456408bd90b18584a30968ed7fd94c --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51/training_log_df914990-e9e0-4938-bd86-82b27524e6dc.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:22:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:22:31 2025 --- +[2025-07-05 10:22:31] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:22:31 2025 --- +[2025-07-05 10:22:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:22:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-05 10:22:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:22:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:22:31] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:22:31] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:22:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51 +[2025-07-05 10:22:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_51 +[2025-07-05 10:22:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:22:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:22:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:22:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:22:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:22:32] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:22:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:22:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:22:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:22:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:22:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:22:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:22:35] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:22:35] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:22:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:22:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:22:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:22:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:22:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:22:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:22:35] [Rank 0] PRINT: Model returns: +[2025-07-05 10:22:35] [Rank 0] PRINT: Model returns: +[2025-07-05 10:22:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:22:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:22:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:22:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:22:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 10:22:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-05 10:22:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:22:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:22:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:22:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:22:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:22:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:22:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:22:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:22:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:22:35] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:23:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:23:42] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:23:42] [Rank 0] PRINT: Starting training... +[2025-07-05 10:23:42] [Rank 0] PRINT: Starting training... +[2025-07-05 10:23:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:23:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:23:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:23:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:23:51] [Rank 0] step:21/10000 train_time:1026ms step_avg:48.85ms +[2025-07-05 10:23:51] [Rank 0] step:21/10000 train_time:1026ms step_avg:48.85ms +[2025-07-05 10:23:53] [Rank 0] step:41/10000 train_time:2349ms step_avg:57.29ms +[2025-07-05 10:23:53] [Rank 0] step:41/10000 train_time:2349ms step_avg:57.29ms +[2025-07-05 10:23:54] [Rank 0] step:61/10000 train_time:3671ms step_avg:60.18ms +[2025-07-05 10:23:54] [Rank 0] step:61/10000 train_time:3671ms step_avg:60.18ms +[2025-07-05 10:23:55] [Rank 0] step:81/10000 train_time:4994ms step_avg:61.66ms +[2025-07-05 10:23:55] [Rank 0] step:81/10000 train_time:4994ms step_avg:61.66ms +[2025-07-05 10:23:57] [Rank 0] step:101/10000 train_time:6318ms step_avg:62.56ms +[2025-07-05 10:23:57] [Rank 0] step:101/10000 train_time:6318ms step_avg:62.56ms +[2025-07-05 10:23:58] [Rank 0] step:121/10000 train_time:7642ms step_avg:63.16ms +[2025-07-05 10:23:58] [Rank 0] step:121/10000 train_time:7642ms step_avg:63.16ms +[2025-07-05 10:23:59] [Rank 0] step:141/10000 train_time:8966ms step_avg:63.59ms +[2025-07-05 10:23:59] [Rank 0] step:141/10000 train_time:8966ms step_avg:63.59ms +[2025-07-05 10:24:00] [Rank 0] step:161/10000 train_time:10291ms step_avg:63.92ms +[2025-07-05 10:24:00] [Rank 0] step:161/10000 train_time:10291ms step_avg:63.92ms +[2025-07-05 10:24:02] [Rank 0] step:181/10000 train_time:12283ms step_avg:67.86ms +[2025-07-05 10:24:02] [Rank 0] step:181/10000 train_time:12283ms step_avg:67.86ms +[2025-07-05 10:24:03] [Rank 0] step:201/10000 train_time:13000ms step_avg:64.68ms +[2025-07-05 10:24:03] [Rank 0] step:201/10000 train_time:13000ms step_avg:64.68ms +[2025-07-05 10:24:05] [Rank 0] step:221/10000 train_time:14332ms step_avg:64.85ms +[2025-07-05 10:24:05] [Rank 0] step:221/10000 train_time:14332ms step_avg:64.85ms +[2025-07-05 10:24:06] [Rank 0] step:241/10000 train_time:15663ms step_avg:64.99ms +[2025-07-05 10:24:06] [Rank 0] step:241/10000 train_time:15663ms step_avg:64.99ms +[2025-07-05 10:24:07] [Rank 0] step:261/10000 train_time:16998ms step_avg:65.13ms +[2025-07-05 10:24:07] [Rank 0] step:261/10000 train_time:16998ms step_avg:65.13ms +[2025-07-05 10:24:09] [Rank 0] step:281/10000 train_time:18333ms step_avg:65.24ms +[2025-07-05 10:24:09] [Rank 0] step:281/10000 train_time:18333ms step_avg:65.24ms +[2025-07-05 10:24:10] [Rank 0] step:301/10000 train_time:19668ms step_avg:65.34ms +[2025-07-05 10:24:10] [Rank 0] step:301/10000 train_time:19668ms step_avg:65.34ms +[2025-07-05 10:24:11] [Rank 0] step:321/10000 train_time:21001ms step_avg:65.42ms +[2025-07-05 10:24:11] [Rank 0] step:321/10000 train_time:21001ms step_avg:65.42ms +[2025-07-05 10:24:13] [Rank 0] step:341/10000 train_time:22335ms step_avg:65.50ms +[2025-07-05 10:24:13] [Rank 0] step:341/10000 train_time:22335ms step_avg:65.50ms +[2025-07-05 10:24:14] [Rank 0] step:361/10000 train_time:24348ms step_avg:67.45ms +[2025-07-05 10:24:14] [Rank 0] step:361/10000 train_time:24348ms step_avg:67.45ms +[2025-07-05 10:24:15] [Rank 0] step:381/10000 train_time:25065ms step_avg:65.79ms +[2025-07-05 10:24:15] [Rank 0] step:381/10000 train_time:25065ms step_avg:65.79ms +[2025-07-05 10:24:17] [Rank 0] step:401/10000 train_time:26399ms step_avg:65.83ms +[2025-07-05 10:24:17] [Rank 0] step:401/10000 train_time:26399ms step_avg:65.83ms +[2025-07-05 10:24:18] [Rank 0] step:421/10000 train_time:27734ms step_avg:65.88ms +[2025-07-05 10:24:18] [Rank 0] step:421/10000 train_time:27734ms step_avg:65.88ms +[2025-07-05 10:24:19] [Rank 0] step:441/10000 train_time:29069ms step_avg:65.92ms +[2025-07-05 10:24:19] [Rank 0] step:441/10000 train_time:29069ms step_avg:65.92ms +[2025-07-05 10:24:21] [Rank 0] step:461/10000 train_time:30404ms step_avg:65.95ms +[2025-07-05 10:24:21] [Rank 0] step:461/10000 train_time:30404ms step_avg:65.95ms +[2025-07-05 10:24:22] [Rank 0] step:481/10000 train_time:31739ms step_avg:65.99ms +[2025-07-05 10:24:22] [Rank 0] step:481/10000 train_time:31739ms step_avg:65.99ms +[2025-07-05 10:24:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:24:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:24:24] [Rank 0] PRINT: step:500/10000 train_loss:5.2776 val_loss:2.4798 train_time:33681ms step_avg:67.36ms +[2025-07-05 10:24:24] [Rank 0] PRINT: step:500/10000 train_loss:5.2776 val_loss:2.4798 train_time:33681ms step_avg:67.36ms +[2025-07-05 10:24:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:24:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f4ed07a19f37584c374f5082142e4a1f843aa0b2 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0815f9b9-887a-40c5-8af3-3e86de43c33b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_0815f9b9-887a-40c5-8af3-3e86de43c33b.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_0815f9b9-887a-40c5-8af3-3e86de43c33b.txt new file mode 100644 index 0000000000000000000000000000000000000000..3162d5f895c43c541fab6ce74a8d08c23df96b6e --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_0815f9b9-887a-40c5-8af3-3e86de43c33b.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:22:06] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:22:06 2025 --- +[2025-07-05 08:22:06] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:22:06 2025 --- +[2025-07-05 08:22:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:22:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:22:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:22:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:22:06] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:22:06] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:22:06] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42 +[2025-07-05 08:22:06] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42 +[2025-07-05 08:22:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:22:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:22:06] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:22:06] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:22:06] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:22:06] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:22:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:22:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:22:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:22:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:22:08] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:22:08] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:22:09] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:22:09] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:22:09] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:22:09] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:22:09] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:22:09] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:22:09] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:22:09] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:22:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:22:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:22:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:22:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:22:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:22:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:22:09] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:22:09] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:22:09] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:22:09] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:22:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:22:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:22:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:22:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:22:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:22:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:22:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:22:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:23:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:23:12] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:23:12] [Rank 0] PRINT: Starting training... +[2025-07-05 08:23:12] [Rank 0] PRINT: Starting training... +[2025-07-05 08:23:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:23:19] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:23:21] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 08:23:21] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 08:23:22] [Rank 0] step:41/10000 train_time:2330ms step_avg:56.83ms +[2025-07-05 08:23:22] [Rank 0] step:41/10000 train_time:2330ms step_avg:56.83ms +[2025-07-05 08:23:24] [Rank 0] step:61/10000 train_time:3647ms step_avg:59.79ms +[2025-07-05 08:23:24] [Rank 0] step:61/10000 train_time:3647ms step_avg:59.79ms +[2025-07-05 08:23:25] [Rank 0] step:81/10000 train_time:4967ms step_avg:61.32ms +[2025-07-05 08:23:25] [Rank 0] step:81/10000 train_time:4967ms step_avg:61.32ms +[2025-07-05 08:23:26] [Rank 0] step:101/10000 train_time:6289ms step_avg:62.27ms +[2025-07-05 08:23:26] [Rank 0] step:101/10000 train_time:6289ms step_avg:62.27ms +[2025-07-05 08:23:27] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 08:23:27] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 08:23:29] [Rank 0] step:141/10000 train_time:8941ms step_avg:63.41ms +[2025-07-05 08:23:29] [Rank 0] step:141/10000 train_time:8941ms step_avg:63.41ms +[2025-07-05 08:23:30] [Rank 0] step:161/10000 train_time:10270ms step_avg:63.79ms +[2025-07-05 08:23:30] [Rank 0] step:161/10000 train_time:10270ms step_avg:63.79ms +[2025-07-05 08:23:31] [Rank 0] step:181/10000 train_time:11853ms step_avg:65.49ms +[2025-07-05 08:23:31] [Rank 0] step:181/10000 train_time:11853ms step_avg:65.49ms +[2025-07-05 08:23:33] [Rank 0] step:201/10000 train_time:12930ms step_avg:64.33ms +[2025-07-05 08:23:33] [Rank 0] step:201/10000 train_time:12930ms step_avg:64.33ms +[2025-07-05 08:23:34] [Rank 0] step:221/10000 train_time:14259ms step_avg:64.52ms +[2025-07-05 08:23:34] [Rank 0] step:221/10000 train_time:14259ms step_avg:64.52ms +[2025-07-05 08:23:35] [Rank 0] step:241/10000 train_time:15587ms step_avg:64.67ms +[2025-07-05 08:23:35] [Rank 0] step:241/10000 train_time:15587ms step_avg:64.67ms +[2025-07-05 08:23:37] [Rank 0] step:261/10000 train_time:16915ms step_avg:64.81ms +[2025-07-05 08:23:37] [Rank 0] step:261/10000 train_time:16915ms step_avg:64.81ms +[2025-07-05 08:23:38] [Rank 0] step:281/10000 train_time:18242ms step_avg:64.92ms +[2025-07-05 08:23:38] [Rank 0] step:281/10000 train_time:18242ms step_avg:64.92ms +[2025-07-05 08:23:39] [Rank 0] step:301/10000 train_time:19573ms step_avg:65.03ms +[2025-07-05 08:23:39] [Rank 0] step:301/10000 train_time:19573ms step_avg:65.03ms +[2025-07-05 08:23:41] [Rank 0] step:321/10000 train_time:20904ms step_avg:65.12ms +[2025-07-05 08:23:41] [Rank 0] step:321/10000 train_time:20904ms step_avg:65.12ms +[2025-07-05 08:23:42] [Rank 0] step:341/10000 train_time:22234ms step_avg:65.20ms +[2025-07-05 08:23:42] [Rank 0] step:341/10000 train_time:22234ms step_avg:65.20ms +[2025-07-05 08:23:44] [Rank 0] step:361/10000 train_time:24242ms step_avg:67.15ms +[2025-07-05 08:23:44] [Rank 0] step:361/10000 train_time:24242ms step_avg:67.15ms +[2025-07-05 08:23:45] [Rank 0] step:381/10000 train_time:24961ms step_avg:65.51ms +[2025-07-05 08:23:45] [Rank 0] step:381/10000 train_time:24961ms step_avg:65.51ms +[2025-07-05 08:23:46] [Rank 0] step:401/10000 train_time:26295ms step_avg:65.57ms +[2025-07-05 08:23:46] [Rank 0] step:401/10000 train_time:26295ms step_avg:65.57ms +[2025-07-05 08:23:48] [Rank 0] step:421/10000 train_time:27626ms step_avg:65.62ms +[2025-07-05 08:23:48] [Rank 0] step:421/10000 train_time:27626ms step_avg:65.62ms +[2025-07-05 08:23:49] [Rank 0] step:441/10000 train_time:28959ms step_avg:65.67ms +[2025-07-05 08:23:49] [Rank 0] step:441/10000 train_time:28959ms step_avg:65.67ms +[2025-07-05 08:23:50] [Rank 0] step:461/10000 train_time:30292ms step_avg:65.71ms +[2025-07-05 08:23:50] [Rank 0] step:461/10000 train_time:30292ms step_avg:65.71ms +[2025-07-05 08:23:52] [Rank 0] step:481/10000 train_time:31625ms step_avg:65.75ms +[2025-07-05 08:23:52] [Rank 0] step:481/10000 train_time:31625ms step_avg:65.75ms +[2025-07-05 08:23:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:23:54] [Rank 0] PRINT: step:500/10000 train_loss:3.8946 val_loss:1.8779 train_time:33563ms step_avg:67.13ms +[2025-07-05 08:23:54] [Rank 0] PRINT: step:500/10000 train_loss:3.8946 val_loss:1.8779 train_time:33563ms step_avg:67.13ms +[2025-07-05 08:23:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:23:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a6f4d69056459c56c8266106473182d8d28a2cb4 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a4330c92-3eb3-45dc-8522-41fcc8214e8e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/training_log_a4330c92-3eb3-45dc-8522-41fcc8214e8e.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/training_log_a4330c92-3eb3-45dc-8522-41fcc8214e8e.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef8a39c5eaebcea6e1296d0d6e821bee47f87c40 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43/training_log_a4330c92-3eb3-45dc-8522-41fcc8214e8e.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:49:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:49:18 2025 --- +[2025-07-05 08:49:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:49:18 2025 --- +[2025-07-05 08:49:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:49:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:49:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:49:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:49:19] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:49:19] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:49:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43 +[2025-07-05 08:49:19] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_43 +[2025-07-05 08:49:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:49:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:49:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:49:19] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:49:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:49:19] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:49:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:49:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:49:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:49:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:49:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:49:21] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:49:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:49:22] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:49:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:49:22] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:49:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:49:22] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:49:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:49:22] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:49:22] [Rank 0] PRINT: Model returns: +[2025-07-05 08:49:22] [Rank 0] PRINT: Model returns: +[2025-07-05 08:49:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:49:22] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:49:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:49:22] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:49:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:49:22] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:49:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:49:22] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:49:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:49:22] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:49:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:49:22] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:49:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:49:22] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:49:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:49:22] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:50:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:50:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:50:25] [Rank 0] PRINT: Starting training... +[2025-07-05 08:50:25] [Rank 0] PRINT: Starting training... +[2025-07-05 08:50:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:50:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:50:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:50:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:50:34] [Rank 0] step:21/10000 train_time:813ms step_avg:38.73ms +[2025-07-05 08:50:34] [Rank 0] step:21/10000 train_time:813ms step_avg:38.73ms +[2025-07-05 08:50:35] [Rank 0] step:41/10000 train_time:2128ms step_avg:51.89ms +[2025-07-05 08:50:35] [Rank 0] step:41/10000 train_time:2128ms step_avg:51.89ms +[2025-07-05 08:50:37] [Rank 0] step:61/10000 train_time:3446ms step_avg:56.49ms +[2025-07-05 08:50:37] [Rank 0] step:61/10000 train_time:3446ms step_avg:56.49ms +[2025-07-05 08:50:38] [Rank 0] step:81/10000 train_time:4766ms step_avg:58.83ms +[2025-07-05 08:50:38] [Rank 0] step:81/10000 train_time:4766ms step_avg:58.83ms +[2025-07-05 08:50:39] [Rank 0] step:101/10000 train_time:6191ms step_avg:61.30ms +[2025-07-05 08:50:39] [Rank 0] step:101/10000 train_time:6191ms step_avg:61.30ms +[2025-07-05 08:50:41] [Rank 0] step:121/10000 train_time:7515ms step_avg:62.11ms +[2025-07-05 08:50:41] [Rank 0] step:121/10000 train_time:7515ms step_avg:62.11ms +[2025-07-05 08:50:42] [Rank 0] step:141/10000 train_time:8843ms step_avg:62.72ms +[2025-07-05 08:50:42] [Rank 0] step:141/10000 train_time:8843ms step_avg:62.72ms +[2025-07-05 08:50:43] [Rank 0] step:161/10000 train_time:10171ms step_avg:63.17ms +[2025-07-05 08:50:43] [Rank 0] step:161/10000 train_time:10171ms step_avg:63.17ms +[2025-07-05 08:50:45] [Rank 0] step:181/10000 train_time:12177ms step_avg:67.28ms +[2025-07-05 08:50:45] [Rank 0] step:181/10000 train_time:12177ms step_avg:67.28ms +[2025-07-05 08:50:46] [Rank 0] step:201/10000 train_time:12893ms step_avg:64.14ms +[2025-07-05 08:50:46] [Rank 0] step:201/10000 train_time:12893ms step_avg:64.14ms +[2025-07-05 08:50:47] [Rank 0] step:221/10000 train_time:14221ms step_avg:64.35ms +[2025-07-05 08:50:47] [Rank 0] step:221/10000 train_time:14221ms step_avg:64.35ms +[2025-07-05 08:50:49] [Rank 0] step:241/10000 train_time:15548ms step_avg:64.52ms +[2025-07-05 08:50:49] [Rank 0] step:241/10000 train_time:15548ms step_avg:64.52ms +[2025-07-05 08:50:50] [Rank 0] step:261/10000 train_time:16877ms step_avg:64.66ms +[2025-07-05 08:50:50] [Rank 0] step:261/10000 train_time:16877ms step_avg:64.66ms +[2025-07-05 08:50:51] [Rank 0] step:281/10000 train_time:18207ms step_avg:64.79ms +[2025-07-05 08:50:51] [Rank 0] step:281/10000 train_time:18207ms step_avg:64.79ms +[2025-07-05 08:50:53] [Rank 0] step:301/10000 train_time:19553ms step_avg:64.96ms +[2025-07-05 08:50:53] [Rank 0] step:301/10000 train_time:19553ms step_avg:64.96ms +[2025-07-05 08:50:54] [Rank 0] step:321/10000 train_time:20969ms step_avg:65.33ms +[2025-07-05 08:50:54] [Rank 0] step:321/10000 train_time:20969ms step_avg:65.33ms +[2025-07-05 08:50:55] [Rank 0] step:341/10000 train_time:22300ms step_avg:65.40ms +[2025-07-05 08:50:55] [Rank 0] step:341/10000 train_time:22300ms step_avg:65.40ms +[2025-07-05 08:50:57] [Rank 0] step:361/10000 train_time:23986ms step_avg:66.44ms +[2025-07-05 08:50:57] [Rank 0] step:361/10000 train_time:23986ms step_avg:66.44ms +[2025-07-05 08:50:58] [Rank 0] step:381/10000 train_time:25118ms step_avg:65.93ms +[2025-07-05 08:50:58] [Rank 0] step:381/10000 train_time:25118ms step_avg:65.93ms +[2025-07-05 08:51:00] [Rank 0] step:401/10000 train_time:26549ms step_avg:66.21ms +[2025-07-05 08:51:00] [Rank 0] step:401/10000 train_time:26549ms step_avg:66.21ms +[2025-07-05 08:51:01] [Rank 0] step:421/10000 train_time:27882ms step_avg:66.23ms +[2025-07-05 08:51:01] [Rank 0] step:421/10000 train_time:27882ms step_avg:66.23ms +[2025-07-05 08:51:02] [Rank 0] step:441/10000 train_time:29215ms step_avg:66.25ms +[2025-07-05 08:51:02] [Rank 0] step:441/10000 train_time:29215ms step_avg:66.25ms +[2025-07-05 08:51:04] [Rank 0] step:461/10000 train_time:30547ms step_avg:66.26ms +[2025-07-05 08:51:04] [Rank 0] step:461/10000 train_time:30547ms step_avg:66.26ms +[2025-07-05 08:51:05] [Rank 0] step:481/10000 train_time:31877ms step_avg:66.27ms +[2025-07-05 08:51:05] [Rank 0] step:481/10000 train_time:31877ms step_avg:66.27ms +[2025-07-05 08:51:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:51:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:51:07] [Rank 0] PRINT: step:500/10000 train_loss:3.9254 val_loss:1.9033 train_time:33817ms step_avg:67.63ms +[2025-07-05 08:51:07] [Rank 0] PRINT: step:500/10000 train_loss:3.9254 val_loss:1.9033 train_time:33817ms step_avg:67.63ms +[2025-07-05 08:51:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:51:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e291ccfe280371b3db41218652953665c84a0f47 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1db592eb-10b5-42c3-9bf5-6726ea9a0e1d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/training_log_1db592eb-10b5-42c3-9bf5-6726ea9a0e1d.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/training_log_1db592eb-10b5-42c3-9bf5-6726ea9a0e1d.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9681df62db8e5e60a0120462c572374f535eb9d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44/training_log_1db592eb-10b5-42c3-9bf5-6726ea9a0e1d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:16:52] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:16:52 2025 --- +[2025-07-05 09:16:52] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:16:52 2025 --- +[2025-07-05 09:16:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:16:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:16:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:16:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:16:52] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:16:52] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:16:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44 +[2025-07-05 09:16:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_44 +[2025-07-05 09:16:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:16:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:16:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:16:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:16:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:16:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:16:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:16:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:16:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:16:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:16:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:16:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:16:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:16:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:16:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:16:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:16:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:16:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:16:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:16:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:16:55] [Rank 0] PRINT: Model returns: +[2025-07-05 09:16:55] [Rank 0] PRINT: Model returns: +[2025-07-05 09:16:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:16:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:16:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:16:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:16:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:16:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:16:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:16:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:16:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:16:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:16:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:16:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:16:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:16:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:16:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:16:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:18:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:18:00] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:18:00] [Rank 0] PRINT: Starting training... +[2025-07-05 09:18:00] [Rank 0] PRINT: Starting training... +[2025-07-05 09:18:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:18:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:18:09] [Rank 0] step:21/10000 train_time:811ms step_avg:38.61ms +[2025-07-05 09:18:09] [Rank 0] step:21/10000 train_time:811ms step_avg:38.61ms +[2025-07-05 09:18:11] [Rank 0] step:41/10000 train_time:2126ms step_avg:51.85ms +[2025-07-05 09:18:11] [Rank 0] step:41/10000 train_time:2126ms step_avg:51.85ms +[2025-07-05 09:18:12] [Rank 0] step:61/10000 train_time:3445ms step_avg:56.47ms +[2025-07-05 09:18:12] [Rank 0] step:61/10000 train_time:3445ms step_avg:56.47ms +[2025-07-05 09:18:13] [Rank 0] step:81/10000 train_time:4765ms step_avg:58.83ms +[2025-07-05 09:18:13] [Rank 0] step:81/10000 train_time:4765ms step_avg:58.83ms +[2025-07-05 09:18:15] [Rank 0] step:101/10000 train_time:6088ms step_avg:60.28ms +[2025-07-05 09:18:15] [Rank 0] step:101/10000 train_time:6088ms step_avg:60.28ms +[2025-07-05 09:18:16] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.27ms +[2025-07-05 09:18:16] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.27ms +[2025-07-05 09:18:17] [Rank 0] step:141/10000 train_time:8993ms step_avg:63.78ms +[2025-07-05 09:18:17] [Rank 0] step:141/10000 train_time:8993ms step_avg:63.78ms +[2025-07-05 09:18:19] [Rank 0] step:161/10000 train_time:10143ms step_avg:63.00ms +[2025-07-05 09:18:19] [Rank 0] step:161/10000 train_time:10143ms step_avg:63.00ms +[2025-07-05 09:18:20] [Rank 0] step:181/10000 train_time:11519ms step_avg:63.64ms +[2025-07-05 09:18:20] [Rank 0] step:181/10000 train_time:11519ms step_avg:63.64ms +[2025-07-05 09:18:21] [Rank 0] step:201/10000 train_time:12798ms step_avg:63.67ms +[2025-07-05 09:18:21] [Rank 0] step:201/10000 train_time:12798ms step_avg:63.67ms +[2025-07-05 09:18:23] [Rank 0] step:221/10000 train_time:14125ms step_avg:63.91ms +[2025-07-05 09:18:23] [Rank 0] step:221/10000 train_time:14125ms step_avg:63.91ms +[2025-07-05 09:18:24] [Rank 0] step:241/10000 train_time:15454ms step_avg:64.12ms +[2025-07-05 09:18:24] [Rank 0] step:241/10000 train_time:15454ms step_avg:64.12ms +[2025-07-05 09:18:25] [Rank 0] step:261/10000 train_time:16784ms step_avg:64.31ms +[2025-07-05 09:18:25] [Rank 0] step:261/10000 train_time:16784ms step_avg:64.31ms +[2025-07-05 09:18:27] [Rank 0] step:281/10000 train_time:18115ms step_avg:64.47ms +[2025-07-05 09:18:27] [Rank 0] step:281/10000 train_time:18115ms step_avg:64.47ms +[2025-07-05 09:18:28] [Rank 0] step:301/10000 train_time:19445ms step_avg:64.60ms +[2025-07-05 09:18:28] [Rank 0] step:301/10000 train_time:19445ms step_avg:64.60ms +[2025-07-05 09:18:29] [Rank 0] step:321/10000 train_time:20776ms step_avg:64.72ms +[2025-07-05 09:18:29] [Rank 0] step:321/10000 train_time:20776ms step_avg:64.72ms +[2025-07-05 09:18:31] [Rank 0] step:341/10000 train_time:22107ms step_avg:64.83ms +[2025-07-05 09:18:31] [Rank 0] step:341/10000 train_time:22107ms step_avg:64.83ms +[2025-07-05 09:18:32] [Rank 0] step:361/10000 train_time:23487ms step_avg:65.06ms +[2025-07-05 09:18:32] [Rank 0] step:361/10000 train_time:23487ms step_avg:65.06ms +[2025-07-05 09:18:33] [Rank 0] step:381/10000 train_time:24830ms step_avg:65.17ms +[2025-07-05 09:18:33] [Rank 0] step:381/10000 train_time:24830ms step_avg:65.17ms +[2025-07-05 09:18:35] [Rank 0] step:401/10000 train_time:26162ms step_avg:65.24ms +[2025-07-05 09:18:35] [Rank 0] step:401/10000 train_time:26162ms step_avg:65.24ms +[2025-07-05 09:18:36] [Rank 0] step:421/10000 train_time:27494ms step_avg:65.31ms +[2025-07-05 09:18:36] [Rank 0] step:421/10000 train_time:27494ms step_avg:65.31ms +[2025-07-05 09:18:37] [Rank 0] step:441/10000 train_time:28824ms step_avg:65.36ms +[2025-07-05 09:18:37] [Rank 0] step:441/10000 train_time:28824ms step_avg:65.36ms +[2025-07-05 09:18:39] [Rank 0] step:461/10000 train_time:30155ms step_avg:65.41ms +[2025-07-05 09:18:39] [Rank 0] step:461/10000 train_time:30155ms step_avg:65.41ms +[2025-07-05 09:18:40] [Rank 0] step:481/10000 train_time:31487ms step_avg:65.46ms +[2025-07-05 09:18:40] [Rank 0] step:481/10000 train_time:31487ms step_avg:65.46ms +[2025-07-05 09:18:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:18:42] [Rank 0] PRINT: step:500/10000 train_loss:3.8577 val_loss:1.9033 train_time:33424ms step_avg:66.85ms +[2025-07-05 09:18:42] [Rank 0] PRINT: step:500/10000 train_loss:3.8577 val_loss:1.9033 train_time:33424ms step_avg:66.85ms +[2025-07-05 09:18:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:18:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..132cdb8f238af732be5be342c985056106e5bfb3 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "fdf8f694-757a-49a7-b180-e8599635c8bd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_fdf8f694-757a-49a7-b180-e8599635c8bd.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_fdf8f694-757a-49a7-b180-e8599635c8bd.txt new file mode 100644 index 0000000000000000000000000000000000000000..cfc4339cb5ad0a5a730aa702ae6399edc97574e3 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_fdf8f694-757a-49a7-b180-e8599635c8bd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:43:54] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:43:54 2025 --- +[2025-07-05 09:43:54] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:43:54 2025 --- +[2025-07-05 09:43:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:43:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:43:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:43:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:43:54] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:43:54] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:43:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45 +[2025-07-05 09:43:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45 +[2025-07-05 09:43:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:43:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:43:55] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:43:55] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:43:55] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:43:55] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:43:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:43:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:43:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:43:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:43:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:43:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:43:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:43:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:43:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:43:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:43:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:43:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:43:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:43:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:43:59] [Rank 0] PRINT: Model returns: +[2025-07-05 09:43:59] [Rank 0] PRINT: Model returns: +[2025-07-05 09:43:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:43:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:43:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:43:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:43:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:43:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:43:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:43:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:43:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:43:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:43:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:43:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:43:59] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:43:59] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:43:59] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:43:59] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:45:02] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:45:02] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:45:02] [Rank 0] PRINT: Starting training... +[2025-07-05 09:45:02] [Rank 0] PRINT: Starting training... +[2025-07-05 09:45:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:45:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:45:11] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.40ms +[2025-07-05 09:45:11] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.40ms +[2025-07-05 09:45:12] [Rank 0] step:41/10000 train_time:2332ms step_avg:56.87ms +[2025-07-05 09:45:12] [Rank 0] step:41/10000 train_time:2332ms step_avg:56.87ms +[2025-07-05 09:45:14] [Rank 0] step:61/10000 train_time:3650ms step_avg:59.84ms +[2025-07-05 09:45:14] [Rank 0] step:61/10000 train_time:3650ms step_avg:59.84ms +[2025-07-05 09:45:15] [Rank 0] step:81/10000 train_time:4970ms step_avg:61.36ms +[2025-07-05 09:45:15] [Rank 0] step:81/10000 train_time:4970ms step_avg:61.36ms +[2025-07-05 09:45:16] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 09:45:16] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 09:45:18] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 09:45:18] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 09:45:19] [Rank 0] step:141/10000 train_time:8940ms step_avg:63.40ms +[2025-07-05 09:45:19] [Rank 0] step:141/10000 train_time:8940ms step_avg:63.40ms +[2025-07-05 09:45:20] [Rank 0] step:161/10000 train_time:10268ms step_avg:63.77ms +[2025-07-05 09:45:20] [Rank 0] step:161/10000 train_time:10268ms step_avg:63.77ms +[2025-07-05 09:45:22] [Rank 0] step:181/10000 train_time:11640ms step_avg:64.31ms +[2025-07-05 09:45:22] [Rank 0] step:181/10000 train_time:11640ms step_avg:64.31ms +[2025-07-05 09:45:23] [Rank 0] step:201/10000 train_time:12968ms step_avg:64.52ms +[2025-07-05 09:45:23] [Rank 0] step:201/10000 train_time:12968ms step_avg:64.52ms +[2025-07-05 09:45:24] [Rank 0] step:221/10000 train_time:14296ms step_avg:64.69ms +[2025-07-05 09:45:24] [Rank 0] step:221/10000 train_time:14296ms step_avg:64.69ms +[2025-07-05 09:45:26] [Rank 0] step:241/10000 train_time:15625ms step_avg:64.83ms +[2025-07-05 09:45:26] [Rank 0] step:241/10000 train_time:15625ms step_avg:64.83ms +[2025-07-05 09:45:27] [Rank 0] step:261/10000 train_time:16956ms step_avg:64.96ms +[2025-07-05 09:45:27] [Rank 0] step:261/10000 train_time:16956ms step_avg:64.96ms +[2025-07-05 09:45:28] [Rank 0] step:281/10000 train_time:18287ms step_avg:65.08ms +[2025-07-05 09:45:28] [Rank 0] step:281/10000 train_time:18287ms step_avg:65.08ms +[2025-07-05 09:45:30] [Rank 0] step:301/10000 train_time:19618ms step_avg:65.18ms +[2025-07-05 09:45:30] [Rank 0] step:301/10000 train_time:19618ms step_avg:65.18ms +[2025-07-05 09:45:31] [Rank 0] step:321/10000 train_time:20949ms step_avg:65.26ms +[2025-07-05 09:45:31] [Rank 0] step:321/10000 train_time:20949ms step_avg:65.26ms +[2025-07-05 09:45:32] [Rank 0] step:341/10000 train_time:22279ms step_avg:65.33ms +[2025-07-05 09:45:32] [Rank 0] step:341/10000 train_time:22279ms step_avg:65.33ms +[2025-07-05 09:45:34] [Rank 0] step:361/10000 train_time:23713ms step_avg:65.69ms +[2025-07-05 09:45:34] [Rank 0] step:361/10000 train_time:23713ms step_avg:65.69ms +[2025-07-05 09:45:35] [Rank 0] step:381/10000 train_time:25098ms step_avg:65.87ms +[2025-07-05 09:45:35] [Rank 0] step:381/10000 train_time:25098ms step_avg:65.87ms +[2025-07-05 09:45:37] [Rank 0] step:401/10000 train_time:26429ms step_avg:65.91ms +[2025-07-05 09:45:37] [Rank 0] step:401/10000 train_time:26429ms step_avg:65.91ms +[2025-07-05 09:45:38] [Rank 0] step:421/10000 train_time:27762ms step_avg:65.94ms +[2025-07-05 09:45:38] [Rank 0] step:421/10000 train_time:27762ms step_avg:65.94ms +[2025-07-05 09:45:39] [Rank 0] step:441/10000 train_time:29094ms step_avg:65.97ms +[2025-07-05 09:45:39] [Rank 0] step:441/10000 train_time:29094ms step_avg:65.97ms +[2025-07-05 09:45:41] [Rank 0] step:461/10000 train_time:30426ms step_avg:66.00ms +[2025-07-05 09:45:41] [Rank 0] step:461/10000 train_time:30426ms step_avg:66.00ms +[2025-07-05 09:45:42] [Rank 0] step:481/10000 train_time:31760ms step_avg:66.03ms +[2025-07-05 09:45:42] [Rank 0] step:481/10000 train_time:31760ms step_avg:66.03ms +[2025-07-05 09:45:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:45:44] [Rank 0] PRINT: step:500/10000 train_loss:3.8858 val_loss:1.8875 train_time:33700ms step_avg:67.40ms +[2025-07-05 09:45:44] [Rank 0] PRINT: step:500/10000 train_loss:3.8858 val_loss:1.8875 train_time:33700ms step_avg:67.40ms +[2025-07-05 09:45:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:45:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2c6819e6ea84f0e8828182b2493b3c3ad88a5f95 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2b64ff55-6f45-4846-bd89-f6715bba4bac", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/training_log_2b64ff55-6f45-4846-bd89-f6715bba4bac.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/training_log_2b64ff55-6f45-4846-bd89-f6715bba4bac.txt new file mode 100644 index 0000000000000000000000000000000000000000..3c00b65955f3875cf0673821a39e857eeef10806 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46/training_log_2b64ff55-6f45-4846-bd89-f6715bba4bac.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:11:13] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:11:13 2025 --- +[2025-07-05 10:11:13] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:11:13 2025 --- +[2025-07-05 10:11:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:11:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:11:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:11:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:11:14] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:11:14] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:11:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46 +[2025-07-05 10:11:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_46 +[2025-07-05 10:11:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:11:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:11:14] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:11:14] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:11:14] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:11:14] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:11:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:11:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:11:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:11:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:11:16] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:11:16] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:11:17] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:11:17] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:11:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:11:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:11:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:11:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:11:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:11:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:11:17] [Rank 0] PRINT: Model returns: +[2025-07-05 10:11:17] [Rank 0] PRINT: Model returns: +[2025-07-05 10:11:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:11:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:11:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:11:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:11:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 10:11:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 10:11:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:11:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:11:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:11:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:11:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:11:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:11:17] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:11:17] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:11:17] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:11:17] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:12:20] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:12:20] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:12:20] [Rank 0] PRINT: Starting training... +[2025-07-05 10:12:20] [Rank 0] PRINT: Starting training... +[2025-07-05 10:12:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:12:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:12:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:12:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:12:29] [Rank 0] step:21/10000 train_time:813ms step_avg:38.70ms +[2025-07-05 10:12:29] [Rank 0] step:21/10000 train_time:813ms step_avg:38.70ms +[2025-07-05 10:12:30] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.96ms +[2025-07-05 10:12:30] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.96ms +[2025-07-05 10:12:31] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.56ms +[2025-07-05 10:12:31] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.56ms +[2025-07-05 10:12:33] [Rank 0] step:81/10000 train_time:4873ms step_avg:60.16ms +[2025-07-05 10:12:33] [Rank 0] step:81/10000 train_time:4873ms step_avg:60.16ms +[2025-07-05 10:12:34] [Rank 0] step:101/10000 train_time:6196ms step_avg:61.35ms +[2025-07-05 10:12:34] [Rank 0] step:101/10000 train_time:6196ms step_avg:61.35ms +[2025-07-05 10:12:35] [Rank 0] step:121/10000 train_time:7522ms step_avg:62.17ms +[2025-07-05 10:12:35] [Rank 0] step:121/10000 train_time:7522ms step_avg:62.17ms +[2025-07-05 10:12:37] [Rank 0] step:141/10000 train_time:8849ms step_avg:62.76ms +[2025-07-05 10:12:37] [Rank 0] step:141/10000 train_time:8849ms step_avg:62.76ms +[2025-07-05 10:12:38] [Rank 0] step:161/10000 train_time:10177ms step_avg:63.21ms +[2025-07-05 10:12:38] [Rank 0] step:161/10000 train_time:10177ms step_avg:63.21ms +[2025-07-05 10:12:39] [Rank 0] step:181/10000 train_time:12165ms step_avg:67.21ms +[2025-07-05 10:12:39] [Rank 0] step:181/10000 train_time:12165ms step_avg:67.21ms +[2025-07-05 10:12:41] [Rank 0] step:201/10000 train_time:12882ms step_avg:64.09ms +[2025-07-05 10:12:41] [Rank 0] step:201/10000 train_time:12882ms step_avg:64.09ms +[2025-07-05 10:12:42] [Rank 0] step:221/10000 train_time:14213ms step_avg:64.31ms +[2025-07-05 10:12:42] [Rank 0] step:221/10000 train_time:14213ms step_avg:64.31ms +[2025-07-05 10:12:43] [Rank 0] step:241/10000 train_time:15541ms step_avg:64.49ms +[2025-07-05 10:12:43] [Rank 0] step:241/10000 train_time:15541ms step_avg:64.49ms +[2025-07-05 10:12:45] [Rank 0] step:261/10000 train_time:16872ms step_avg:64.64ms +[2025-07-05 10:12:45] [Rank 0] step:261/10000 train_time:16872ms step_avg:64.64ms +[2025-07-05 10:12:46] [Rank 0] step:281/10000 train_time:18203ms step_avg:64.78ms +[2025-07-05 10:12:46] [Rank 0] step:281/10000 train_time:18203ms step_avg:64.78ms +[2025-07-05 10:12:47] [Rank 0] step:301/10000 train_time:19533ms step_avg:64.90ms +[2025-07-05 10:12:47] [Rank 0] step:301/10000 train_time:19533ms step_avg:64.90ms +[2025-07-05 10:12:49] [Rank 0] step:321/10000 train_time:20865ms step_avg:65.00ms +[2025-07-05 10:12:49] [Rank 0] step:321/10000 train_time:20865ms step_avg:65.00ms +[2025-07-05 10:12:50] [Rank 0] step:341/10000 train_time:22197ms step_avg:65.09ms +[2025-07-05 10:12:50] [Rank 0] step:341/10000 train_time:22197ms step_avg:65.09ms +[2025-07-05 10:12:51] [Rank 0] step:361/10000 train_time:23577ms step_avg:65.31ms +[2025-07-05 10:12:51] [Rank 0] step:361/10000 train_time:23577ms step_avg:65.31ms +[2025-07-05 10:12:53] [Rank 0] step:381/10000 train_time:24914ms step_avg:65.39ms +[2025-07-05 10:12:53] [Rank 0] step:381/10000 train_time:24914ms step_avg:65.39ms +[2025-07-05 10:12:54] [Rank 0] step:401/10000 train_time:26247ms step_avg:65.45ms +[2025-07-05 10:12:54] [Rank 0] step:401/10000 train_time:26247ms step_avg:65.45ms +[2025-07-05 10:12:55] [Rank 0] step:421/10000 train_time:27581ms step_avg:65.51ms +[2025-07-05 10:12:55] [Rank 0] step:421/10000 train_time:27581ms step_avg:65.51ms +[2025-07-05 10:12:57] [Rank 0] step:441/10000 train_time:28913ms step_avg:65.56ms +[2025-07-05 10:12:57] [Rank 0] step:441/10000 train_time:28913ms step_avg:65.56ms +[2025-07-05 10:12:58] [Rank 0] step:461/10000 train_time:30248ms step_avg:65.61ms +[2025-07-05 10:12:58] [Rank 0] step:461/10000 train_time:30248ms step_avg:65.61ms +[2025-07-05 10:12:59] [Rank 0] step:481/10000 train_time:31583ms step_avg:65.66ms +[2025-07-05 10:12:59] [Rank 0] step:481/10000 train_time:31583ms step_avg:65.66ms +[2025-07-05 10:13:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:13:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:13:02] [Rank 0] PRINT: step:500/10000 train_loss:3.9189 val_loss:1.8988 train_time:33525ms step_avg:67.05ms +[2025-07-05 10:13:02] [Rank 0] PRINT: step:500/10000 train_loss:3.9189 val_loss:1.8988 train_time:33525ms step_avg:67.05ms +[2025-07-05 10:13:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:13:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..07ab0dbaa92d8b76a80e95896a01bb25240feac8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "ffa51317-e6ad-41f5-94f1-c2de0c3a5a98", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/training_log_ffa51317-e6ad-41f5-94f1-c2de0c3a5a98.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/training_log_ffa51317-e6ad-41f5-94f1-c2de0c3a5a98.txt new file mode 100644 index 0000000000000000000000000000000000000000..b4066d372123363daa526ecadd609aff0a545e48 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47/training_log_ffa51317-e6ad-41f5-94f1-c2de0c3a5a98.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:30:34] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:30:34 2025 --- +[2025-07-05 08:30:34] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:30:34 2025 --- +[2025-07-05 08:30:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:30:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:30:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:30:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:30:34] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:30:34] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:30:34] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47 +[2025-07-05 08:30:34] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_47 +[2025-07-05 08:30:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:30:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:30:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:30:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:30:34] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:30:34] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:30:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:30:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:30:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:30:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:30:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:30:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:30:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:30:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:30:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:30:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:30:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:30:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:30:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:30:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:30:37] [Rank 0] PRINT: Model returns: +[2025-07-05 08:30:37] [Rank 0] PRINT: Model returns: +[2025-07-05 08:30:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:30:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:30:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:30:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:30:37] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:30:37] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:30:37] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:30:37] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:30:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:30:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:30:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:30:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:30:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:30:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:30:37] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:30:37] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:31:40] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:31:40] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:31:41] [Rank 0] PRINT: Starting training... +[2025-07-05 08:31:41] [Rank 0] PRINT: Starting training... +[2025-07-05 08:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:31:48] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:31:48] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:31:49] [Rank 0] step:21/10000 train_time:1005ms step_avg:47.86ms +[2025-07-05 08:31:49] [Rank 0] step:21/10000 train_time:1005ms step_avg:47.86ms +[2025-07-05 08:31:51] [Rank 0] step:41/10000 train_time:2318ms step_avg:56.54ms +[2025-07-05 08:31:51] [Rank 0] step:41/10000 train_time:2318ms step_avg:56.54ms +[2025-07-05 08:31:52] [Rank 0] step:61/10000 train_time:3634ms step_avg:59.57ms +[2025-07-05 08:31:52] [Rank 0] step:61/10000 train_time:3634ms step_avg:59.57ms +[2025-07-05 08:31:53] [Rank 0] step:81/10000 train_time:4951ms step_avg:61.12ms +[2025-07-05 08:31:53] [Rank 0] step:81/10000 train_time:4951ms step_avg:61.12ms +[2025-07-05 08:31:55] [Rank 0] step:101/10000 train_time:6269ms step_avg:62.07ms +[2025-07-05 08:31:55] [Rank 0] step:101/10000 train_time:6269ms step_avg:62.07ms +[2025-07-05 08:31:56] [Rank 0] step:121/10000 train_time:7592ms step_avg:62.75ms +[2025-07-05 08:31:56] [Rank 0] step:121/10000 train_time:7592ms step_avg:62.75ms +[2025-07-05 08:31:57] [Rank 0] step:141/10000 train_time:8917ms step_avg:63.24ms +[2025-07-05 08:31:57] [Rank 0] step:141/10000 train_time:8917ms step_avg:63.24ms +[2025-07-05 08:31:59] [Rank 0] step:161/10000 train_time:10244ms step_avg:63.63ms +[2025-07-05 08:31:59] [Rank 0] step:161/10000 train_time:10244ms step_avg:63.63ms +[2025-07-05 08:32:00] [Rank 0] step:181/10000 train_time:11618ms step_avg:64.19ms +[2025-07-05 08:32:00] [Rank 0] step:181/10000 train_time:11618ms step_avg:64.19ms +[2025-07-05 08:32:01] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 08:32:01] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-05 08:32:03] [Rank 0] step:221/10000 train_time:14294ms step_avg:64.68ms +[2025-07-05 08:32:03] [Rank 0] step:221/10000 train_time:14294ms step_avg:64.68ms +[2025-07-05 08:32:04] [Rank 0] step:241/10000 train_time:15622ms step_avg:64.82ms +[2025-07-05 08:32:04] [Rank 0] step:241/10000 train_time:15622ms step_avg:64.82ms +[2025-07-05 08:32:05] [Rank 0] step:261/10000 train_time:16951ms step_avg:64.95ms +[2025-07-05 08:32:05] [Rank 0] step:261/10000 train_time:16951ms step_avg:64.95ms +[2025-07-05 08:32:07] [Rank 0] step:281/10000 train_time:18280ms step_avg:65.05ms +[2025-07-05 08:32:07] [Rank 0] step:281/10000 train_time:18280ms step_avg:65.05ms +[2025-07-05 08:32:08] [Rank 0] step:301/10000 train_time:19610ms step_avg:65.15ms +[2025-07-05 08:32:08] [Rank 0] step:301/10000 train_time:19610ms step_avg:65.15ms +[2025-07-05 08:32:09] [Rank 0] step:321/10000 train_time:20940ms step_avg:65.23ms +[2025-07-05 08:32:09] [Rank 0] step:321/10000 train_time:20940ms step_avg:65.23ms +[2025-07-05 08:32:11] [Rank 0] step:341/10000 train_time:22271ms step_avg:65.31ms +[2025-07-05 08:32:11] [Rank 0] step:341/10000 train_time:22271ms step_avg:65.31ms +[2025-07-05 08:32:12] [Rank 0] step:361/10000 train_time:23650ms step_avg:65.51ms +[2025-07-05 08:32:12] [Rank 0] step:361/10000 train_time:23650ms step_avg:65.51ms +[2025-07-05 08:32:13] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-05 08:32:13] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-05 08:32:15] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 08:32:15] [Rank 0] step:401/10000 train_time:26331ms step_avg:65.66ms +[2025-07-05 08:32:16] [Rank 0] step:421/10000 train_time:27662ms step_avg:65.71ms +[2025-07-05 08:32:16] [Rank 0] step:421/10000 train_time:27662ms step_avg:65.71ms +[2025-07-05 08:32:17] [Rank 0] step:441/10000 train_time:28994ms step_avg:65.75ms +[2025-07-05 08:32:17] [Rank 0] step:441/10000 train_time:28994ms step_avg:65.75ms +[2025-07-05 08:32:19] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 08:32:19] [Rank 0] step:461/10000 train_time:30325ms step_avg:65.78ms +[2025-07-05 08:32:20] [Rank 0] step:481/10000 train_time:31657ms step_avg:65.82ms +[2025-07-05 08:32:20] [Rank 0] step:481/10000 train_time:31657ms step_avg:65.82ms +[2025-07-05 08:32:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:32:22] [Rank 0] PRINT: step:500/10000 train_loss:3.9607 val_loss:1.9317 train_time:33695ms step_avg:67.39ms +[2025-07-05 08:32:22] [Rank 0] PRINT: step:500/10000 train_loss:3.9607 val_loss:1.9317 train_time:33695ms step_avg:67.39ms +[2025-07-05 08:32:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:32:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e7360c552d22c79e7df43cee78e59d6ff6e80d70 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c280d566-d140-4198-99ce-7a75b43762e7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_c280d566-d140-4198-99ce-7a75b43762e7.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_c280d566-d140-4198-99ce-7a75b43762e7.txt new file mode 100644 index 0000000000000000000000000000000000000000..85c99bfc248aa8dd278bf9518e29f0e4ba1250a6 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_c280d566-d140-4198-99ce-7a75b43762e7.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:57:59] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:57:59 2025 --- +[2025-07-05 08:57:59] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:57:59 2025 --- +[2025-07-05 08:57:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:57:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 08:57:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:57:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:57:59] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:57:59] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:57:59] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48 +[2025-07-05 08:57:59] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48 +[2025-07-05 08:57:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:57:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:57:59] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:57:59] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:57:59] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:57:59] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:58:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:58:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:58:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:58:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:58:01] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:58:01] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:58:02] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:58:02] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:58:02] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:58:02] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:58:02] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:58:02] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:58:02] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:58:02] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:58:02] [Rank 0] PRINT: Model returns: +[2025-07-05 08:58:02] [Rank 0] PRINT: Model returns: +[2025-07-05 08:58:02] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:58:02] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:58:02] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:58:02] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:58:02] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:58:02] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 08:58:02] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:58:02] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:58:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:58:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:58:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:58:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:58:02] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:58:02] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:58:02] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:58:02] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:59:07] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:59:07] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:59:07] [Rank 0] PRINT: Starting training... +[2025-07-05 08:59:07] [Rank 0] PRINT: Starting training... +[2025-07-05 08:59:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:59:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:59:16] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.36ms +[2025-07-05 08:59:16] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.36ms +[2025-07-05 08:59:17] [Rank 0] step:41/10000 train_time:2333ms step_avg:56.91ms +[2025-07-05 08:59:17] [Rank 0] step:41/10000 train_time:2333ms step_avg:56.91ms +[2025-07-05 08:59:18] [Rank 0] step:61/10000 train_time:3654ms step_avg:59.91ms +[2025-07-05 08:59:18] [Rank 0] step:61/10000 train_time:3654ms step_avg:59.91ms +[2025-07-05 08:59:20] [Rank 0] step:81/10000 train_time:4975ms step_avg:61.42ms +[2025-07-05 08:59:20] [Rank 0] step:81/10000 train_time:4975ms step_avg:61.42ms +[2025-07-05 08:59:21] [Rank 0] step:101/10000 train_time:6298ms step_avg:62.35ms +[2025-07-05 08:59:21] [Rank 0] step:101/10000 train_time:6298ms step_avg:62.35ms +[2025-07-05 08:59:22] [Rank 0] step:121/10000 train_time:7623ms step_avg:63.00ms +[2025-07-05 08:59:22] [Rank 0] step:121/10000 train_time:7623ms step_avg:63.00ms +[2025-07-05 08:59:24] [Rank 0] step:141/10000 train_time:8950ms step_avg:63.47ms +[2025-07-05 08:59:24] [Rank 0] step:141/10000 train_time:8950ms step_avg:63.47ms +[2025-07-05 08:59:25] [Rank 0] step:161/10000 train_time:10279ms step_avg:63.84ms +[2025-07-05 08:59:25] [Rank 0] step:161/10000 train_time:10279ms step_avg:63.84ms +[2025-07-05 08:59:26] [Rank 0] step:181/10000 train_time:11609ms step_avg:64.14ms +[2025-07-05 08:59:26] [Rank 0] step:181/10000 train_time:11609ms step_avg:64.14ms +[2025-07-05 08:59:28] [Rank 0] step:201/10000 train_time:13013ms step_avg:64.74ms +[2025-07-05 08:59:28] [Rank 0] step:201/10000 train_time:13013ms step_avg:64.74ms +[2025-07-05 08:59:29] [Rank 0] step:221/10000 train_time:14344ms step_avg:64.91ms +[2025-07-05 08:59:29] [Rank 0] step:221/10000 train_time:14344ms step_avg:64.91ms +[2025-07-05 08:59:30] [Rank 0] step:241/10000 train_time:15675ms step_avg:65.04ms +[2025-07-05 08:59:30] [Rank 0] step:241/10000 train_time:15675ms step_avg:65.04ms +[2025-07-05 08:59:32] [Rank 0] step:261/10000 train_time:17009ms step_avg:65.17ms +[2025-07-05 08:59:32] [Rank 0] step:261/10000 train_time:17009ms step_avg:65.17ms +[2025-07-05 08:59:33] [Rank 0] step:281/10000 train_time:18342ms step_avg:65.27ms +[2025-07-05 08:59:33] [Rank 0] step:281/10000 train_time:18342ms step_avg:65.27ms +[2025-07-05 08:59:34] [Rank 0] step:301/10000 train_time:19675ms step_avg:65.37ms +[2025-07-05 08:59:34] [Rank 0] step:301/10000 train_time:19675ms step_avg:65.37ms +[2025-07-05 08:59:36] [Rank 0] step:321/10000 train_time:21007ms step_avg:65.44ms +[2025-07-05 08:59:36] [Rank 0] step:321/10000 train_time:21007ms step_avg:65.44ms +[2025-07-05 08:59:37] [Rank 0] step:341/10000 train_time:22340ms step_avg:65.51ms +[2025-07-05 08:59:37] [Rank 0] step:341/10000 train_time:22340ms step_avg:65.51ms +[2025-07-05 08:59:38] [Rank 0] step:361/10000 train_time:23675ms step_avg:65.58ms +[2025-07-05 08:59:38] [Rank 0] step:361/10000 train_time:23675ms step_avg:65.58ms +[2025-07-05 08:59:40] [Rank 0] step:381/10000 train_time:25077ms step_avg:65.82ms +[2025-07-05 08:59:40] [Rank 0] step:381/10000 train_time:25077ms step_avg:65.82ms +[2025-07-05 08:59:41] [Rank 0] step:401/10000 train_time:26414ms step_avg:65.87ms +[2025-07-05 08:59:41] [Rank 0] step:401/10000 train_time:26414ms step_avg:65.87ms +[2025-07-05 08:59:42] [Rank 0] step:421/10000 train_time:27748ms step_avg:65.91ms +[2025-07-05 08:59:42] [Rank 0] step:421/10000 train_time:27748ms step_avg:65.91ms +[2025-07-05 08:59:44] [Rank 0] step:441/10000 train_time:29082ms step_avg:65.95ms +[2025-07-05 08:59:44] [Rank 0] step:441/10000 train_time:29082ms step_avg:65.95ms +[2025-07-05 08:59:45] [Rank 0] step:461/10000 train_time:30418ms step_avg:65.98ms +[2025-07-05 08:59:45] [Rank 0] step:461/10000 train_time:30418ms step_avg:65.98ms +[2025-07-05 08:59:46] [Rank 0] step:481/10000 train_time:31754ms step_avg:66.02ms +[2025-07-05 08:59:46] [Rank 0] step:481/10000 train_time:31754ms step_avg:66.02ms +[2025-07-05 08:59:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:59:49] [Rank 0] PRINT: step:500/10000 train_loss:3.8612 val_loss:1.8857 train_time:33697ms step_avg:67.39ms +[2025-07-05 08:59:49] [Rank 0] PRINT: step:500/10000 train_loss:3.8612 val_loss:1.8857 train_time:33697ms step_avg:67.39ms +[2025-07-05 08:59:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:59:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b035a4724ed6ce293e594756e1e3a459f749c566 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "93daf1f4-a854-45cf-b370-3a817bfa5dc5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/training_log_93daf1f4-a854-45cf-b370-3a817bfa5dc5.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/training_log_93daf1f4-a854-45cf-b370-3a817bfa5dc5.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e95ffe89d0dcc06a350020a7d2bcc4d3e036f4d --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49/training_log_93daf1f4-a854-45cf-b370-3a817bfa5dc5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:25:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:25:30 2025 --- +[2025-07-05 09:25:30] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:25:30 2025 --- +[2025-07-05 09:25:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:25:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:25:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:25:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:25:30] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:25:30] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:25:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49 +[2025-07-05 09:25:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_49 +[2025-07-05 09:25:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:25:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:25:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:25:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:25:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:25:31] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:25:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:25:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:25:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:25:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:25:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:25:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:25:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:25:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:25:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:25:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:25:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:25:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:25:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:25:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:25:33] [Rank 0] PRINT: Model returns: +[2025-07-05 09:25:33] [Rank 0] PRINT: Model returns: +[2025-07-05 09:25:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:25:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:25:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:25:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:25:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:25:33] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:25:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:25:33] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:25:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:25:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:25:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:25:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:25:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:25:33] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:25:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:25:33] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:26:38] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:26:38] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:26:38] [Rank 0] PRINT: Starting training... +[2025-07-05 09:26:38] [Rank 0] PRINT: Starting training... +[2025-07-05 09:26:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:26:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:26:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:26:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:26:47] [Rank 0] step:21/10000 train_time:816ms step_avg:38.83ms +[2025-07-05 09:26:47] [Rank 0] step:21/10000 train_time:816ms step_avg:38.83ms +[2025-07-05 09:26:48] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.95ms +[2025-07-05 09:26:48] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.95ms +[2025-07-05 09:26:50] [Rank 0] step:61/10000 train_time:3447ms step_avg:56.51ms +[2025-07-05 09:26:50] [Rank 0] step:61/10000 train_time:3447ms step_avg:56.51ms +[2025-07-05 09:26:51] [Rank 0] step:81/10000 train_time:4766ms step_avg:58.84ms +[2025-07-05 09:26:51] [Rank 0] step:81/10000 train_time:4766ms step_avg:58.84ms +[2025-07-05 09:26:52] [Rank 0] step:101/10000 train_time:6084ms step_avg:60.24ms +[2025-07-05 09:26:52] [Rank 0] step:101/10000 train_time:6084ms step_avg:60.24ms +[2025-07-05 09:26:54] [Rank 0] step:121/10000 train_time:7409ms step_avg:61.23ms +[2025-07-05 09:26:54] [Rank 0] step:121/10000 train_time:7409ms step_avg:61.23ms +[2025-07-05 09:26:55] [Rank 0] step:141/10000 train_time:8735ms step_avg:61.95ms +[2025-07-05 09:26:55] [Rank 0] step:141/10000 train_time:8735ms step_avg:61.95ms +[2025-07-05 09:26:56] [Rank 0] step:161/10000 train_time:10062ms step_avg:62.50ms +[2025-07-05 09:26:56] [Rank 0] step:161/10000 train_time:10062ms step_avg:62.50ms +[2025-07-05 09:26:58] [Rank 0] step:181/10000 train_time:11390ms step_avg:62.93ms +[2025-07-05 09:26:58] [Rank 0] step:181/10000 train_time:11390ms step_avg:62.93ms +[2025-07-05 09:26:59] [Rank 0] step:201/10000 train_time:12779ms step_avg:63.58ms +[2025-07-05 09:26:59] [Rank 0] step:201/10000 train_time:12779ms step_avg:63.58ms +[2025-07-05 09:27:00] [Rank 0] step:221/10000 train_time:14109ms step_avg:63.84ms +[2025-07-05 09:27:00] [Rank 0] step:221/10000 train_time:14109ms step_avg:63.84ms +[2025-07-05 09:27:02] [Rank 0] step:241/10000 train_time:15439ms step_avg:64.06ms +[2025-07-05 09:27:02] [Rank 0] step:241/10000 train_time:15439ms step_avg:64.06ms +[2025-07-05 09:27:03] [Rank 0] step:261/10000 train_time:16773ms step_avg:64.27ms +[2025-07-05 09:27:03] [Rank 0] step:261/10000 train_time:16773ms step_avg:64.27ms +[2025-07-05 09:27:04] [Rank 0] step:281/10000 train_time:18106ms step_avg:64.43ms +[2025-07-05 09:27:04] [Rank 0] step:281/10000 train_time:18106ms step_avg:64.43ms +[2025-07-05 09:27:06] [Rank 0] step:301/10000 train_time:19438ms step_avg:64.58ms +[2025-07-05 09:27:06] [Rank 0] step:301/10000 train_time:19438ms step_avg:64.58ms +[2025-07-05 09:27:07] [Rank 0] step:321/10000 train_time:20771ms step_avg:64.71ms +[2025-07-05 09:27:07] [Rank 0] step:321/10000 train_time:20771ms step_avg:64.71ms +[2025-07-05 09:27:08] [Rank 0] step:341/10000 train_time:22104ms step_avg:64.82ms +[2025-07-05 09:27:08] [Rank 0] step:341/10000 train_time:22104ms step_avg:64.82ms +[2025-07-05 09:27:10] [Rank 0] step:361/10000 train_time:23436ms step_avg:64.92ms +[2025-07-05 09:27:10] [Rank 0] step:361/10000 train_time:23436ms step_avg:64.92ms +[2025-07-05 09:27:11] [Rank 0] step:381/10000 train_time:24814ms step_avg:65.13ms +[2025-07-05 09:27:11] [Rank 0] step:381/10000 train_time:24814ms step_avg:65.13ms +[2025-07-05 09:27:12] [Rank 0] step:401/10000 train_time:26146ms step_avg:65.20ms +[2025-07-05 09:27:12] [Rank 0] step:401/10000 train_time:26146ms step_avg:65.20ms +[2025-07-05 09:27:14] [Rank 0] step:421/10000 train_time:27478ms step_avg:65.27ms +[2025-07-05 09:27:14] [Rank 0] step:421/10000 train_time:27478ms step_avg:65.27ms +[2025-07-05 09:27:15] [Rank 0] step:441/10000 train_time:28811ms step_avg:65.33ms +[2025-07-05 09:27:15] [Rank 0] step:441/10000 train_time:28811ms step_avg:65.33ms +[2025-07-05 09:27:16] [Rank 0] step:461/10000 train_time:30143ms step_avg:65.39ms +[2025-07-05 09:27:16] [Rank 0] step:461/10000 train_time:30143ms step_avg:65.39ms +[2025-07-05 09:27:18] [Rank 0] step:481/10000 train_time:31476ms step_avg:65.44ms +[2025-07-05 09:27:18] [Rank 0] step:481/10000 train_time:31476ms step_avg:65.44ms +[2025-07-05 09:27:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:27:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:27:20] [Rank 0] PRINT: step:500/10000 train_loss:4.0617 val_loss:1.9862 train_time:33413ms step_avg:66.83ms +[2025-07-05 09:27:20] [Rank 0] PRINT: step:500/10000 train_loss:4.0617 val_loss:1.9862 train_time:33413ms step_avg:66.83ms +[2025-07-05 09:27:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:27:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f2e29d62277666ec26aceddf89cbc4ad44c0292f --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "00f9517f-61f1-45c7-9d85-c14fa6ecba39", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/training_log_00f9517f-61f1-45c7-9d85-c14fa6ecba39.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/training_log_00f9517f-61f1-45c7-9d85-c14fa6ecba39.txt new file mode 100644 index 0000000000000000000000000000000000000000..81040221f4df008657fce4db3904819680ca3203 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50/training_log_00f9517f-61f1-45c7-9d85-c14fa6ecba39.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:52:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:52:57 2025 --- +[2025-07-05 09:52:57] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:52:57 2025 --- +[2025-07-05 09:52:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:52:57] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 09:52:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:52:57] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:52:57] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:52:57] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:52:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50 +[2025-07-05 09:52:57] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_50 +[2025-07-05 09:52:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:52:57] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:52:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:52:58] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:52:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:52:58] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:53:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:53:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:53:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:53:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:53:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:53:00] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:53:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:53:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:53:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:53:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:53:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:53:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:53:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:53:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:53:01] [Rank 0] PRINT: Model returns: +[2025-07-05 09:53:01] [Rank 0] PRINT: Model returns: +[2025-07-05 09:53:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:53:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:53:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:53:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:53:01] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:53:01] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 09:53:01] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:53:01] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:53:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:53:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:53:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:53:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:53:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:53:01] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:53:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:53:01] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:54:02] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:54:02] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:54:02] [Rank 0] PRINT: Starting training... +[2025-07-05 09:54:02] [Rank 0] PRINT: Starting training... +[2025-07-05 09:54:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:54:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:54:11] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.43ms +[2025-07-05 09:54:11] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.43ms +[2025-07-05 09:54:12] [Rank 0] step:41/10000 train_time:2333ms step_avg:56.90ms +[2025-07-05 09:54:12] [Rank 0] step:41/10000 train_time:2333ms step_avg:56.90ms +[2025-07-05 09:54:14] [Rank 0] step:61/10000 train_time:3652ms step_avg:59.87ms +[2025-07-05 09:54:14] [Rank 0] step:61/10000 train_time:3652ms step_avg:59.87ms +[2025-07-05 09:54:15] [Rank 0] step:81/10000 train_time:4972ms step_avg:61.38ms +[2025-07-05 09:54:15] [Rank 0] step:81/10000 train_time:4972ms step_avg:61.38ms +[2025-07-05 09:54:16] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 09:54:16] [Rank 0] step:101/10000 train_time:6292ms step_avg:62.29ms +[2025-07-05 09:54:18] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 09:54:18] [Rank 0] step:121/10000 train_time:7615ms step_avg:62.93ms +[2025-07-05 09:54:19] [Rank 0] step:141/10000 train_time:8941ms step_avg:63.41ms +[2025-07-05 09:54:19] [Rank 0] step:141/10000 train_time:8941ms step_avg:63.41ms +[2025-07-05 09:54:20] [Rank 0] step:161/10000 train_time:10268ms step_avg:63.77ms +[2025-07-05 09:54:20] [Rank 0] step:161/10000 train_time:10268ms step_avg:63.77ms +[2025-07-05 09:54:22] [Rank 0] step:181/10000 train_time:11596ms step_avg:64.07ms +[2025-07-05 09:54:22] [Rank 0] step:181/10000 train_time:11596ms step_avg:64.07ms +[2025-07-05 09:54:23] [Rank 0] step:201/10000 train_time:12975ms step_avg:64.55ms +[2025-07-05 09:54:23] [Rank 0] step:201/10000 train_time:12975ms step_avg:64.55ms +[2025-07-05 09:54:24] [Rank 0] step:221/10000 train_time:14303ms step_avg:64.72ms +[2025-07-05 09:54:24] [Rank 0] step:221/10000 train_time:14303ms step_avg:64.72ms +[2025-07-05 09:54:26] [Rank 0] step:241/10000 train_time:15632ms step_avg:64.86ms +[2025-07-05 09:54:26] [Rank 0] step:241/10000 train_time:15632ms step_avg:64.86ms +[2025-07-05 09:54:27] [Rank 0] step:261/10000 train_time:16966ms step_avg:65.00ms +[2025-07-05 09:54:27] [Rank 0] step:261/10000 train_time:16966ms step_avg:65.00ms +[2025-07-05 09:54:29] [Rank 0] step:281/10000 train_time:18396ms step_avg:65.47ms +[2025-07-05 09:54:29] [Rank 0] step:281/10000 train_time:18396ms step_avg:65.47ms +[2025-07-05 09:54:30] [Rank 0] step:301/10000 train_time:19727ms step_avg:65.54ms +[2025-07-05 09:54:30] [Rank 0] step:301/10000 train_time:19727ms step_avg:65.54ms +[2025-07-05 09:54:31] [Rank 0] step:321/10000 train_time:21058ms step_avg:65.60ms +[2025-07-05 09:54:31] [Rank 0] step:321/10000 train_time:21058ms step_avg:65.60ms +[2025-07-05 09:54:33] [Rank 0] step:341/10000 train_time:22390ms step_avg:65.66ms +[2025-07-05 09:54:33] [Rank 0] step:341/10000 train_time:22390ms step_avg:65.66ms +[2025-07-05 09:54:34] [Rank 0] step:361/10000 train_time:23722ms step_avg:65.71ms +[2025-07-05 09:54:34] [Rank 0] step:361/10000 train_time:23722ms step_avg:65.71ms +[2025-07-05 09:54:35] [Rank 0] step:381/10000 train_time:25104ms step_avg:65.89ms +[2025-07-05 09:54:35] [Rank 0] step:381/10000 train_time:25104ms step_avg:65.89ms +[2025-07-05 09:54:37] [Rank 0] step:401/10000 train_time:26437ms step_avg:65.93ms +[2025-07-05 09:54:37] [Rank 0] step:401/10000 train_time:26437ms step_avg:65.93ms +[2025-07-05 09:54:38] [Rank 0] step:421/10000 train_time:27770ms step_avg:65.96ms +[2025-07-05 09:54:38] [Rank 0] step:421/10000 train_time:27770ms step_avg:65.96ms +[2025-07-05 09:54:39] [Rank 0] step:441/10000 train_time:29103ms step_avg:65.99ms +[2025-07-05 09:54:39] [Rank 0] step:441/10000 train_time:29103ms step_avg:65.99ms +[2025-07-05 09:54:41] [Rank 0] step:461/10000 train_time:30436ms step_avg:66.02ms +[2025-07-05 09:54:41] [Rank 0] step:461/10000 train_time:30436ms step_avg:66.02ms +[2025-07-05 09:54:42] [Rank 0] step:481/10000 train_time:31769ms step_avg:66.05ms +[2025-07-05 09:54:42] [Rank 0] step:481/10000 train_time:31769ms step_avg:66.05ms +[2025-07-05 09:54:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:54:44] [Rank 0] PRINT: step:500/10000 train_loss:3.9479 val_loss:1.9081 train_time:33707ms step_avg:67.41ms +[2025-07-05 09:54:44] [Rank 0] PRINT: step:500/10000 train_loss:3.9479 val_loss:1.9081 train_time:33707ms step_avg:67.41ms +[2025-07-05 09:54:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:54:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ffecabeb003d75ab4930d9488d815a841b9afcd2 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "20793aae-92dd-4b5e-873a-e3547fe5b53d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/training_log_20793aae-92dd-4b5e-873a-e3547fe5b53d.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/training_log_20793aae-92dd-4b5e-873a-e3547fe5b53d.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ef8f386a55624daf4aa97faadefc2e46791714a --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51/training_log_20793aae-92dd-4b5e-873a-e3547fe5b53d.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:20:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:20:20 2025 --- +[2025-07-05 10:20:20] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:20:20 2025 --- +[2025-07-05 10:20:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:20:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-05 10:20:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:20:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:20:20] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:20:20] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:20:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51 +[2025-07-05 10:20:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_51 +[2025-07-05 10:20:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:20:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:20:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:20:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:20:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:20:21] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:20:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:20:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:20:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:20:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:20:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:20:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:20:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:20:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:20:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:20:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:20:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:20:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:20:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:20:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:20:24] [Rank 0] PRINT: Model returns: +[2025-07-05 10:20:24] [Rank 0] PRINT: Model returns: +[2025-07-05 10:20:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:20:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:20:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:20:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:20:24] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 10:20:24] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-05 10:20:24] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:20:24] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:20:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:20:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:20:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:20:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:20:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:20:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:20:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:20:24] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:21:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:21:27] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:21:27] [Rank 0] PRINT: Starting training... +[2025-07-05 10:21:27] [Rank 0] PRINT: Starting training... +[2025-07-05 10:21:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:21:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:21:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:21:35] [Rank 0] step:21/10000 train_time:915ms step_avg:43.55ms +[2025-07-05 10:21:35] [Rank 0] step:21/10000 train_time:915ms step_avg:43.55ms +[2025-07-05 10:21:37] [Rank 0] step:41/10000 train_time:2231ms step_avg:54.41ms +[2025-07-05 10:21:37] [Rank 0] step:41/10000 train_time:2231ms step_avg:54.41ms +[2025-07-05 10:21:38] [Rank 0] step:61/10000 train_time:3551ms step_avg:58.21ms +[2025-07-05 10:21:38] [Rank 0] step:61/10000 train_time:3551ms step_avg:58.21ms +[2025-07-05 10:21:39] [Rank 0] step:81/10000 train_time:4869ms step_avg:60.11ms +[2025-07-05 10:21:39] [Rank 0] step:81/10000 train_time:4869ms step_avg:60.11ms +[2025-07-05 10:21:41] [Rank 0] step:101/10000 train_time:6188ms step_avg:61.27ms +[2025-07-05 10:21:41] [Rank 0] step:101/10000 train_time:6188ms step_avg:61.27ms +[2025-07-05 10:21:42] [Rank 0] step:121/10000 train_time:7512ms step_avg:62.08ms +[2025-07-05 10:21:42] [Rank 0] step:121/10000 train_time:7512ms step_avg:62.08ms +[2025-07-05 10:21:43] [Rank 0] step:141/10000 train_time:8839ms step_avg:62.69ms +[2025-07-05 10:21:43] [Rank 0] step:141/10000 train_time:8839ms step_avg:62.69ms +[2025-07-05 10:21:45] [Rank 0] step:161/10000 train_time:10166ms step_avg:63.14ms +[2025-07-05 10:21:45] [Rank 0] step:161/10000 train_time:10166ms step_avg:63.14ms +[2025-07-05 10:21:46] [Rank 0] step:181/10000 train_time:12157ms step_avg:67.17ms +[2025-07-05 10:21:46] [Rank 0] step:181/10000 train_time:12157ms step_avg:67.17ms +[2025-07-05 10:21:47] [Rank 0] step:201/10000 train_time:12871ms step_avg:64.04ms +[2025-07-05 10:21:47] [Rank 0] step:201/10000 train_time:12871ms step_avg:64.04ms +[2025-07-05 10:21:49] [Rank 0] step:221/10000 train_time:14199ms step_avg:64.25ms +[2025-07-05 10:21:49] [Rank 0] step:221/10000 train_time:14199ms step_avg:64.25ms +[2025-07-05 10:21:50] [Rank 0] step:241/10000 train_time:15528ms step_avg:64.43ms +[2025-07-05 10:21:50] [Rank 0] step:241/10000 train_time:15528ms step_avg:64.43ms +[2025-07-05 10:21:51] [Rank 0] step:261/10000 train_time:16859ms step_avg:64.59ms +[2025-07-05 10:21:51] [Rank 0] step:261/10000 train_time:16859ms step_avg:64.59ms +[2025-07-05 10:21:53] [Rank 0] step:281/10000 train_time:18188ms step_avg:64.73ms +[2025-07-05 10:21:53] [Rank 0] step:281/10000 train_time:18188ms step_avg:64.73ms +[2025-07-05 10:21:54] [Rank 0] step:301/10000 train_time:19519ms step_avg:64.85ms +[2025-07-05 10:21:54] [Rank 0] step:301/10000 train_time:19519ms step_avg:64.85ms +[2025-07-05 10:21:55] [Rank 0] step:321/10000 train_time:20850ms step_avg:64.95ms +[2025-07-05 10:21:55] [Rank 0] step:321/10000 train_time:20850ms step_avg:64.95ms +[2025-07-05 10:21:57] [Rank 0] step:341/10000 train_time:22181ms step_avg:65.05ms +[2025-07-05 10:21:57] [Rank 0] step:341/10000 train_time:22181ms step_avg:65.05ms +[2025-07-05 10:21:58] [Rank 0] step:361/10000 train_time:24188ms step_avg:67.00ms +[2025-07-05 10:21:58] [Rank 0] step:361/10000 train_time:24188ms step_avg:67.00ms +[2025-07-05 10:21:59] [Rank 0] step:381/10000 train_time:24906ms step_avg:65.37ms +[2025-07-05 10:21:59] [Rank 0] step:381/10000 train_time:24906ms step_avg:65.37ms +[2025-07-05 10:22:01] [Rank 0] step:401/10000 train_time:26238ms step_avg:65.43ms +[2025-07-05 10:22:01] [Rank 0] step:401/10000 train_time:26238ms step_avg:65.43ms +[2025-07-05 10:22:02] [Rank 0] step:421/10000 train_time:27569ms step_avg:65.48ms +[2025-07-05 10:22:02] [Rank 0] step:421/10000 train_time:27569ms step_avg:65.48ms +[2025-07-05 10:22:03] [Rank 0] step:441/10000 train_time:28903ms step_avg:65.54ms +[2025-07-05 10:22:03] [Rank 0] step:441/10000 train_time:28903ms step_avg:65.54ms +[2025-07-05 10:22:05] [Rank 0] step:461/10000 train_time:30236ms step_avg:65.59ms +[2025-07-05 10:22:05] [Rank 0] step:461/10000 train_time:30236ms step_avg:65.59ms +[2025-07-05 10:22:06] [Rank 0] step:481/10000 train_time:31570ms step_avg:65.63ms +[2025-07-05 10:22:06] [Rank 0] step:481/10000 train_time:31570ms step_avg:65.63ms +[2025-07-05 10:22:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:22:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:22:08] [Rank 0] PRINT: step:500/10000 train_loss:3.9686 val_loss:1.9595 train_time:33510ms step_avg:67.02ms +[2025-07-05 10:22:08] [Rank 0] PRINT: step:500/10000 train_loss:3.9686 val_loss:1.9595 train_time:33510ms step_avg:67.02ms +[2025-07-05 10:22:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:22:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b0ae9769d6ae55fafadbe5af687fa503590b74dc --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e6e7db1b-71aa-406b-9310-0552265bb5d4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_e6e7db1b-71aa-406b-9310-0552265bb5d4.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_e6e7db1b-71aa-406b-9310-0552265bb5d4.txt new file mode 100644 index 0000000000000000000000000000000000000000..1cf3f1168d5c2735e4c09e4b0f5472be41a2e569 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_e6e7db1b-71aa-406b-9310-0552265bb5d4.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:19:51] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:19:51 2025 --- +[2025-07-05 08:19:51] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:19:51 2025 --- +[2025-07-05 08:19:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:19:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:19:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:19:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:19:51] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:19:51] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-05 08:19:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-05 08:19:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-05 08:19:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:19:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:19:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:19:52] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:19:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:19:52] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:19:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:19:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:19:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:19:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:19:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:19:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:19:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:19:55] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:19:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:19:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:19:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:19:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:19:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:19:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:19:55] [Rank 0] PRINT: Model returns: +[2025-07-05 08:19:55] [Rank 0] PRINT: Model returns: +[2025-07-05 08:19:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:19:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:19:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:19:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:19:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:19:55] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:19:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:19:55] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:19:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:19:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:19:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:19:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:19:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:19:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:19:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:19:55] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:20:59] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:20:59] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:20:59] [Rank 0] PRINT: Starting training... +[2025-07-05 08:20:59] [Rank 0] PRINT: Starting training... +[2025-07-05 08:21:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:21:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:21:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:21:07] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:21:09] [Rank 0] step:21/10000 train_time:714ms step_avg:34.02ms +[2025-07-05 08:21:09] [Rank 0] step:21/10000 train_time:714ms step_avg:34.02ms +[2025-07-05 08:21:10] [Rank 0] step:41/10000 train_time:2028ms step_avg:49.47ms +[2025-07-05 08:21:10] [Rank 0] step:41/10000 train_time:2028ms step_avg:49.47ms +[2025-07-05 08:21:11] [Rank 0] step:61/10000 train_time:3348ms step_avg:54.89ms +[2025-07-05 08:21:11] [Rank 0] step:61/10000 train_time:3348ms step_avg:54.89ms +[2025-07-05 08:21:13] [Rank 0] step:81/10000 train_time:4672ms step_avg:57.68ms +[2025-07-05 08:21:13] [Rank 0] step:81/10000 train_time:4672ms step_avg:57.68ms +[2025-07-05 08:21:14] [Rank 0] step:101/10000 train_time:5997ms step_avg:59.38ms +[2025-07-05 08:21:14] [Rank 0] step:101/10000 train_time:5997ms step_avg:59.38ms +[2025-07-05 08:21:15] [Rank 0] step:121/10000 train_time:7325ms step_avg:60.54ms +[2025-07-05 08:21:15] [Rank 0] step:121/10000 train_time:7325ms step_avg:60.54ms +[2025-07-05 08:21:17] [Rank 0] step:141/10000 train_time:8653ms step_avg:61.37ms +[2025-07-05 08:21:17] [Rank 0] step:141/10000 train_time:8653ms step_avg:61.37ms +[2025-07-05 08:21:18] [Rank 0] step:161/10000 train_time:9982ms step_avg:62.00ms +[2025-07-05 08:21:18] [Rank 0] step:161/10000 train_time:9982ms step_avg:62.00ms +[2025-07-05 08:21:20] [Rank 0] step:181/10000 train_time:11314ms step_avg:62.51ms +[2025-07-05 08:21:20] [Rank 0] step:181/10000 train_time:11314ms step_avg:62.51ms +[2025-07-05 08:21:21] [Rank 0] step:201/10000 train_time:12710ms step_avg:63.23ms +[2025-07-05 08:21:21] [Rank 0] step:201/10000 train_time:12710ms step_avg:63.23ms +[2025-07-05 08:21:22] [Rank 0] step:221/10000 train_time:14123ms step_avg:63.90ms +[2025-07-05 08:21:22] [Rank 0] step:221/10000 train_time:14123ms step_avg:63.90ms +[2025-07-05 08:21:24] [Rank 0] step:241/10000 train_time:15378ms step_avg:63.81ms +[2025-07-05 08:21:24] [Rank 0] step:241/10000 train_time:15378ms step_avg:63.81ms +[2025-07-05 08:21:25] [Rank 0] step:261/10000 train_time:16714ms step_avg:64.04ms +[2025-07-05 08:21:25] [Rank 0] step:261/10000 train_time:16714ms step_avg:64.04ms +[2025-07-05 08:21:26] [Rank 0] step:281/10000 train_time:18048ms step_avg:64.23ms +[2025-07-05 08:21:26] [Rank 0] step:281/10000 train_time:18048ms step_avg:64.23ms +[2025-07-05 08:21:28] [Rank 0] step:301/10000 train_time:19382ms step_avg:64.39ms +[2025-07-05 08:21:28] [Rank 0] step:301/10000 train_time:19382ms step_avg:64.39ms +[2025-07-05 08:21:29] [Rank 0] step:321/10000 train_time:20718ms step_avg:64.54ms +[2025-07-05 08:21:29] [Rank 0] step:321/10000 train_time:20718ms step_avg:64.54ms +[2025-07-05 08:21:30] [Rank 0] step:341/10000 train_time:22054ms step_avg:64.67ms +[2025-07-05 08:21:30] [Rank 0] step:341/10000 train_time:22054ms step_avg:64.67ms +[2025-07-05 08:21:32] [Rank 0] step:361/10000 train_time:23390ms step_avg:64.79ms +[2025-07-05 08:21:32] [Rank 0] step:361/10000 train_time:23390ms step_avg:64.79ms +[2025-07-05 08:21:33] [Rank 0] step:381/10000 train_time:24776ms step_avg:65.03ms +[2025-07-05 08:21:33] [Rank 0] step:381/10000 train_time:24776ms step_avg:65.03ms +[2025-07-05 08:21:34] [Rank 0] step:401/10000 train_time:26114ms step_avg:65.12ms +[2025-07-05 08:21:34] [Rank 0] step:401/10000 train_time:26114ms step_avg:65.12ms +[2025-07-05 08:21:36] [Rank 0] step:421/10000 train_time:27449ms step_avg:65.20ms +[2025-07-05 08:21:36] [Rank 0] step:421/10000 train_time:27449ms step_avg:65.20ms +[2025-07-05 08:21:37] [Rank 0] step:441/10000 train_time:28784ms step_avg:65.27ms +[2025-07-05 08:21:37] [Rank 0] step:441/10000 train_time:28784ms step_avg:65.27ms +[2025-07-05 08:21:38] [Rank 0] step:461/10000 train_time:30118ms step_avg:65.33ms +[2025-07-05 08:21:38] [Rank 0] step:461/10000 train_time:30118ms step_avg:65.33ms +[2025-07-05 08:21:40] [Rank 0] step:481/10000 train_time:31452ms step_avg:65.39ms +[2025-07-05 08:21:40] [Rank 0] step:481/10000 train_time:31452ms step_avg:65.39ms +[2025-07-05 08:21:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:21:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:21:42] [Rank 0] PRINT: step:500/10000 train_loss:3.4526 val_loss:1.8982 train_time:33393ms step_avg:66.79ms +[2025-07-05 08:21:42] [Rank 0] PRINT: step:500/10000 train_loss:3.4526 val_loss:1.8982 train_time:33393ms step_avg:66.79ms +[2025-07-05 08:21:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:21:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b3d5f3873ee9018e59bf1ae5a8ba01177eabd515 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f46422ae-291d-4e52-b57b-bf3e6911e8f6", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_f46422ae-291d-4e52-b57b-bf3e6911e8f6.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_f46422ae-291d-4e52-b57b-bf3e6911e8f6.txt new file mode 100644 index 0000000000000000000000000000000000000000..4bfeb6ee32ea2264755ed05185bbdc4d78fbc4eb --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_f46422ae-291d-4e52-b57b-bf3e6911e8f6.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:47:06] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:47:06 2025 --- +[2025-07-05 08:47:06] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:47:06 2025 --- +[2025-07-05 08:47:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:47:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:47:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:47:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:47:06] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:47:06] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-05 08:47:06] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43 +[2025-07-05 08:47:06] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43 +[2025-07-05 08:47:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:47:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:47:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:47:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:47:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:47:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:47:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:47:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:47:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:47:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:47:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:47:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:47:09] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:47:09] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:47:09] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:47:09] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:47:09] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:47:09] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:47:09] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:47:09] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:47:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:47:09] [Rank 0] PRINT: Model returns: +[2025-07-05 08:47:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:47:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:47:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:47:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:47:09] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:47:09] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:47:09] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:47:09] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:47:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:47:09] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:47:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:47:09] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:47:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:47:09] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:47:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:47:09] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:48:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:48:13] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:48:13] [Rank 0] PRINT: Starting training... +[2025-07-05 08:48:13] [Rank 0] PRINT: Starting training... +[2025-07-05 08:48:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:48:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:48:21] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 08:48:21] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 08:48:23] [Rank 0] step:41/10000 train_time:2331ms step_avg:56.85ms +[2025-07-05 08:48:23] [Rank 0] step:41/10000 train_time:2331ms step_avg:56.85ms +[2025-07-05 08:48:24] [Rank 0] step:61/10000 train_time:3650ms step_avg:59.83ms +[2025-07-05 08:48:24] [Rank 0] step:61/10000 train_time:3650ms step_avg:59.83ms +[2025-07-05 08:48:25] [Rank 0] step:81/10000 train_time:4974ms step_avg:61.41ms +[2025-07-05 08:48:25] [Rank 0] step:81/10000 train_time:4974ms step_avg:61.41ms +[2025-07-05 08:48:27] [Rank 0] step:101/10000 train_time:6299ms step_avg:62.37ms +[2025-07-05 08:48:27] [Rank 0] step:101/10000 train_time:6299ms step_avg:62.37ms +[2025-07-05 08:48:28] [Rank 0] step:121/10000 train_time:7627ms step_avg:63.03ms +[2025-07-05 08:48:28] [Rank 0] step:121/10000 train_time:7627ms step_avg:63.03ms +[2025-07-05 08:48:29] [Rank 0] step:141/10000 train_time:8955ms step_avg:63.51ms +[2025-07-05 08:48:29] [Rank 0] step:141/10000 train_time:8955ms step_avg:63.51ms +[2025-07-05 08:48:31] [Rank 0] step:161/10000 train_time:10285ms step_avg:63.88ms +[2025-07-05 08:48:31] [Rank 0] step:161/10000 train_time:10285ms step_avg:63.88ms +[2025-07-05 08:48:32] [Rank 0] step:181/10000 train_time:11663ms step_avg:64.44ms +[2025-07-05 08:48:32] [Rank 0] step:181/10000 train_time:11663ms step_avg:64.44ms +[2025-07-05 08:48:33] [Rank 0] step:201/10000 train_time:12950ms step_avg:64.43ms +[2025-07-05 08:48:33] [Rank 0] step:201/10000 train_time:12950ms step_avg:64.43ms +[2025-07-05 08:48:35] [Rank 0] step:221/10000 train_time:14284ms step_avg:64.63ms +[2025-07-05 08:48:35] [Rank 0] step:221/10000 train_time:14284ms step_avg:64.63ms +[2025-07-05 08:48:36] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 08:48:36] [Rank 0] step:241/10000 train_time:15619ms step_avg:64.81ms +[2025-07-05 08:48:37] [Rank 0] step:261/10000 train_time:16954ms step_avg:64.96ms +[2025-07-05 08:48:37] [Rank 0] step:261/10000 train_time:16954ms step_avg:64.96ms +[2025-07-05 08:48:39] [Rank 0] step:281/10000 train_time:18290ms step_avg:65.09ms +[2025-07-05 08:48:39] [Rank 0] step:281/10000 train_time:18290ms step_avg:65.09ms +[2025-07-05 08:48:40] [Rank 0] step:301/10000 train_time:19625ms step_avg:65.20ms +[2025-07-05 08:48:40] [Rank 0] step:301/10000 train_time:19625ms step_avg:65.20ms +[2025-07-05 08:48:41] [Rank 0] step:321/10000 train_time:20961ms step_avg:65.30ms +[2025-07-05 08:48:41] [Rank 0] step:321/10000 train_time:20961ms step_avg:65.30ms +[2025-07-05 08:48:43] [Rank 0] step:341/10000 train_time:22296ms step_avg:65.38ms +[2025-07-05 08:48:43] [Rank 0] step:341/10000 train_time:22296ms step_avg:65.38ms +[2025-07-05 08:48:44] [Rank 0] step:361/10000 train_time:23675ms step_avg:65.58ms +[2025-07-05 08:48:44] [Rank 0] step:361/10000 train_time:23675ms step_avg:65.58ms +[2025-07-05 08:48:46] [Rank 0] step:381/10000 train_time:25025ms step_avg:65.68ms +[2025-07-05 08:48:46] [Rank 0] step:381/10000 train_time:25025ms step_avg:65.68ms +[2025-07-05 08:48:47] [Rank 0] step:401/10000 train_time:26389ms step_avg:65.81ms +[2025-07-05 08:48:47] [Rank 0] step:401/10000 train_time:26389ms step_avg:65.81ms +[2025-07-05 08:48:48] [Rank 0] step:421/10000 train_time:27725ms step_avg:65.85ms +[2025-07-05 08:48:48] [Rank 0] step:421/10000 train_time:27725ms step_avg:65.85ms +[2025-07-05 08:48:50] [Rank 0] step:441/10000 train_time:29062ms step_avg:65.90ms +[2025-07-05 08:48:50] [Rank 0] step:441/10000 train_time:29062ms step_avg:65.90ms +[2025-07-05 08:48:51] [Rank 0] step:461/10000 train_time:30399ms step_avg:65.94ms +[2025-07-05 08:48:51] [Rank 0] step:461/10000 train_time:30399ms step_avg:65.94ms +[2025-07-05 08:48:52] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 08:48:52] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 08:48:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:48:54] [Rank 0] PRINT: step:500/10000 train_loss:3.6417 val_loss:2.1251 train_time:33680ms step_avg:67.36ms +[2025-07-05 08:48:54] [Rank 0] PRINT: step:500/10000 train_loss:3.6417 val_loss:2.1251 train_time:33680ms step_avg:67.36ms +[2025-07-05 08:48:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:48:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..04582140e9a9f6e86c3555501b22ed91d6f9e271 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "902455e1-212b-49e7-9811-bafebfcc86de", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/training_log_902455e1-212b-49e7-9811-bafebfcc86de.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/training_log_902455e1-212b-49e7-9811-bafebfcc86de.txt new file mode 100644 index 0000000000000000000000000000000000000000..6a763531dafa3c341d7fa0f8b3e13a90a0338866 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44/training_log_902455e1-212b-49e7-9811-bafebfcc86de.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:14:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:14:39 2025 --- +[2025-07-05 09:14:39] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:14:39 2025 --- +[2025-07-05 09:14:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:14:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:14:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:14:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:14:39] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:14:39] [Rank 0] PRINT: Using fixed seed: 44 +[2025-07-05 09:14:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44 +[2025-07-05 09:14:39] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_44 +[2025-07-05 09:14:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:14:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:14:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:14:40] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:14:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:14:40] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:14:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:14:42] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:14:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:14:42] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:14:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:14:42] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:14:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:14:43] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:14:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:14:43] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:14:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:14:43] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:14:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:14:43] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:14:43] [Rank 0] PRINT: Model returns: +[2025-07-05 09:14:43] [Rank 0] PRINT: Model returns: +[2025-07-05 09:14:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:14:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:14:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:14:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:14:43] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:14:43] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:14:43] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:14:43] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:14:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:14:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:14:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:14:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:14:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:14:43] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:14:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:14:43] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:15:47] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:15:47] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:15:47] [Rank 0] PRINT: Starting training... +[2025-07-05 09:15:47] [Rank 0] PRINT: Starting training... +[2025-07-05 09:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:15:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:15:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:15:56] [Rank 0] step:21/10000 train_time:810ms step_avg:38.58ms +[2025-07-05 09:15:56] [Rank 0] step:21/10000 train_time:810ms step_avg:38.58ms +[2025-07-05 09:15:58] [Rank 0] step:41/10000 train_time:2122ms step_avg:51.75ms +[2025-07-05 09:15:58] [Rank 0] step:41/10000 train_time:2122ms step_avg:51.75ms +[2025-07-05 09:15:59] [Rank 0] step:61/10000 train_time:3437ms step_avg:56.35ms +[2025-07-05 09:15:59] [Rank 0] step:61/10000 train_time:3437ms step_avg:56.35ms +[2025-07-05 09:16:00] [Rank 0] step:81/10000 train_time:4760ms step_avg:58.77ms +[2025-07-05 09:16:00] [Rank 0] step:81/10000 train_time:4760ms step_avg:58.77ms +[2025-07-05 09:16:02] [Rank 0] step:101/10000 train_time:6085ms step_avg:60.25ms +[2025-07-05 09:16:02] [Rank 0] step:101/10000 train_time:6085ms step_avg:60.25ms +[2025-07-05 09:16:03] [Rank 0] step:121/10000 train_time:7412ms step_avg:61.26ms +[2025-07-05 09:16:03] [Rank 0] step:121/10000 train_time:7412ms step_avg:61.26ms +[2025-07-05 09:16:04] [Rank 0] step:141/10000 train_time:8742ms step_avg:62.00ms +[2025-07-05 09:16:04] [Rank 0] step:141/10000 train_time:8742ms step_avg:62.00ms +[2025-07-05 09:16:06] [Rank 0] step:161/10000 train_time:10075ms step_avg:62.58ms +[2025-07-05 09:16:06] [Rank 0] step:161/10000 train_time:10075ms step_avg:62.58ms +[2025-07-05 09:16:07] [Rank 0] step:181/10000 train_time:11453ms step_avg:63.27ms +[2025-07-05 09:16:07] [Rank 0] step:181/10000 train_time:11453ms step_avg:63.27ms +[2025-07-05 09:16:08] [Rank 0] step:201/10000 train_time:12806ms step_avg:63.71ms +[2025-07-05 09:16:08] [Rank 0] step:201/10000 train_time:12806ms step_avg:63.71ms +[2025-07-05 09:16:10] [Rank 0] step:221/10000 train_time:14139ms step_avg:63.98ms +[2025-07-05 09:16:10] [Rank 0] step:221/10000 train_time:14139ms step_avg:63.98ms +[2025-07-05 09:16:11] [Rank 0] step:241/10000 train_time:15497ms step_avg:64.30ms +[2025-07-05 09:16:11] [Rank 0] step:241/10000 train_time:15497ms step_avg:64.30ms +[2025-07-05 09:16:12] [Rank 0] step:261/10000 train_time:16808ms step_avg:64.40ms +[2025-07-05 09:16:12] [Rank 0] step:261/10000 train_time:16808ms step_avg:64.40ms +[2025-07-05 09:16:14] [Rank 0] step:281/10000 train_time:18150ms step_avg:64.59ms +[2025-07-05 09:16:14] [Rank 0] step:281/10000 train_time:18150ms step_avg:64.59ms +[2025-07-05 09:16:15] [Rank 0] step:301/10000 train_time:19485ms step_avg:64.73ms +[2025-07-05 09:16:15] [Rank 0] step:301/10000 train_time:19485ms step_avg:64.73ms +[2025-07-05 09:16:16] [Rank 0] step:321/10000 train_time:20818ms step_avg:64.85ms +[2025-07-05 09:16:16] [Rank 0] step:321/10000 train_time:20818ms step_avg:64.85ms +[2025-07-05 09:16:18] [Rank 0] step:341/10000 train_time:22152ms step_avg:64.96ms +[2025-07-05 09:16:18] [Rank 0] step:341/10000 train_time:22152ms step_avg:64.96ms +[2025-07-05 09:16:19] [Rank 0] step:361/10000 train_time:23740ms step_avg:65.76ms +[2025-07-05 09:16:19] [Rank 0] step:361/10000 train_time:23740ms step_avg:65.76ms +[2025-07-05 09:16:20] [Rank 0] step:381/10000 train_time:24873ms step_avg:65.28ms +[2025-07-05 09:16:20] [Rank 0] step:381/10000 train_time:24873ms step_avg:65.28ms +[2025-07-05 09:16:22] [Rank 0] step:401/10000 train_time:26209ms step_avg:65.36ms +[2025-07-05 09:16:22] [Rank 0] step:401/10000 train_time:26209ms step_avg:65.36ms +[2025-07-05 09:16:23] [Rank 0] step:421/10000 train_time:27544ms step_avg:65.43ms +[2025-07-05 09:16:23] [Rank 0] step:421/10000 train_time:27544ms step_avg:65.43ms +[2025-07-05 09:16:24] [Rank 0] step:441/10000 train_time:28880ms step_avg:65.49ms +[2025-07-05 09:16:24] [Rank 0] step:441/10000 train_time:28880ms step_avg:65.49ms +[2025-07-05 09:16:26] [Rank 0] step:461/10000 train_time:30215ms step_avg:65.54ms +[2025-07-05 09:16:26] [Rank 0] step:461/10000 train_time:30215ms step_avg:65.54ms +[2025-07-05 09:16:27] [Rank 0] step:481/10000 train_time:31550ms step_avg:65.59ms +[2025-07-05 09:16:27] [Rank 0] step:481/10000 train_time:31550ms step_avg:65.59ms +[2025-07-05 09:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:16:29] [Rank 0] PRINT: step:500/10000 train_loss:3.6536 val_loss:1.9697 train_time:33492ms step_avg:66.98ms +[2025-07-05 09:16:29] [Rank 0] PRINT: step:500/10000 train_loss:3.6536 val_loss:1.9697 train_time:33492ms step_avg:66.98ms +[2025-07-05 09:16:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:16:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4ce8ef2207843e4e3b543bbf57108439eb143f0b --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "35fa3850-2910-4fe0-ad45-4e62472611fd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_35fa3850-2910-4fe0-ad45-4e62472611fd.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_35fa3850-2910-4fe0-ad45-4e62472611fd.txt new file mode 100644 index 0000000000000000000000000000000000000000..764b2b30f2011fc02a545151118b91caad661741 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_35fa3850-2910-4fe0-ad45-4e62472611fd.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:41:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:41:43 2025 --- +[2025-07-05 09:41:43] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:41:43 2025 --- +[2025-07-05 09:41:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:41:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:41:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:41:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:41:43] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:41:43] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-05 09:41:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45 +[2025-07-05 09:41:43] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45 +[2025-07-05 09:41:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:41:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:41:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:41:43] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:41:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:41:43] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:41:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:41:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:41:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:41:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:41:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:41:45] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:41:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:41:46] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:41:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:41:46] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:41:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:41:46] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:41:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:41:46] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:41:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:41:46] [Rank 0] PRINT: Model returns: +[2025-07-05 09:41:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:41:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:41:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:41:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:41:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:41:46] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:41:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:41:46] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:41:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:41:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:41:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:41:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:41:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:41:46] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:41:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:41:46] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:42:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:42:49] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:42:49] [Rank 0] PRINT: Starting training... +[2025-07-05 09:42:49] [Rank 0] PRINT: Starting training... +[2025-07-05 09:42:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:42:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:42:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:42:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:42:59] [Rank 0] step:21/10000 train_time:1444ms step_avg:68.74ms +[2025-07-05 09:42:59] [Rank 0] step:21/10000 train_time:1444ms step_avg:68.74ms +[2025-07-05 09:43:00] [Rank 0] step:41/10000 train_time:2757ms step_avg:67.23ms +[2025-07-05 09:43:00] [Rank 0] step:41/10000 train_time:2757ms step_avg:67.23ms +[2025-07-05 09:43:01] [Rank 0] step:61/10000 train_time:4073ms step_avg:66.77ms +[2025-07-05 09:43:01] [Rank 0] step:61/10000 train_time:4073ms step_avg:66.77ms +[2025-07-05 09:43:02] [Rank 0] step:81/10000 train_time:5397ms step_avg:66.63ms +[2025-07-05 09:43:02] [Rank 0] step:81/10000 train_time:5397ms step_avg:66.63ms +[2025-07-05 09:43:04] [Rank 0] step:101/10000 train_time:6725ms step_avg:66.59ms +[2025-07-05 09:43:04] [Rank 0] step:101/10000 train_time:6725ms step_avg:66.59ms +[2025-07-05 09:43:05] [Rank 0] step:121/10000 train_time:8052ms step_avg:66.55ms +[2025-07-05 09:43:05] [Rank 0] step:121/10000 train_time:8052ms step_avg:66.55ms +[2025-07-05 09:43:06] [Rank 0] step:141/10000 train_time:9382ms step_avg:66.54ms +[2025-07-05 09:43:06] [Rank 0] step:141/10000 train_time:9382ms step_avg:66.54ms +[2025-07-05 09:43:08] [Rank 0] step:161/10000 train_time:10710ms step_avg:66.52ms +[2025-07-05 09:43:08] [Rank 0] step:161/10000 train_time:10710ms step_avg:66.52ms +[2025-07-05 09:43:09] [Rank 0] step:181/10000 train_time:12291ms step_avg:67.91ms +[2025-07-05 09:43:09] [Rank 0] step:181/10000 train_time:12291ms step_avg:67.91ms +[2025-07-05 09:43:11] [Rank 0] step:201/10000 train_time:13426ms step_avg:66.79ms +[2025-07-05 09:43:11] [Rank 0] step:201/10000 train_time:13426ms step_avg:66.79ms +[2025-07-05 09:43:12] [Rank 0] step:221/10000 train_time:14759ms step_avg:66.78ms +[2025-07-05 09:43:12] [Rank 0] step:221/10000 train_time:14759ms step_avg:66.78ms +[2025-07-05 09:43:13] [Rank 0] step:241/10000 train_time:16094ms step_avg:66.78ms +[2025-07-05 09:43:13] [Rank 0] step:241/10000 train_time:16094ms step_avg:66.78ms +[2025-07-05 09:43:15] [Rank 0] step:261/10000 train_time:17429ms step_avg:66.78ms +[2025-07-05 09:43:15] [Rank 0] step:261/10000 train_time:17429ms step_avg:66.78ms +[2025-07-05 09:43:16] [Rank 0] step:281/10000 train_time:18765ms step_avg:66.78ms +[2025-07-05 09:43:16] [Rank 0] step:281/10000 train_time:18765ms step_avg:66.78ms +[2025-07-05 09:43:17] [Rank 0] step:301/10000 train_time:20099ms step_avg:66.78ms +[2025-07-05 09:43:17] [Rank 0] step:301/10000 train_time:20099ms step_avg:66.78ms +[2025-07-05 09:43:19] [Rank 0] step:321/10000 train_time:21538ms step_avg:67.10ms +[2025-07-05 09:43:19] [Rank 0] step:321/10000 train_time:21538ms step_avg:67.10ms +[2025-07-05 09:43:20] [Rank 0] step:341/10000 train_time:22875ms step_avg:67.08ms +[2025-07-05 09:43:20] [Rank 0] step:341/10000 train_time:22875ms step_avg:67.08ms +[2025-07-05 09:43:21] [Rank 0] step:361/10000 train_time:24462ms step_avg:67.76ms +[2025-07-05 09:43:21] [Rank 0] step:361/10000 train_time:24462ms step_avg:67.76ms +[2025-07-05 09:43:23] [Rank 0] step:381/10000 train_time:25614ms step_avg:67.23ms +[2025-07-05 09:43:23] [Rank 0] step:381/10000 train_time:25614ms step_avg:67.23ms +[2025-07-05 09:43:24] [Rank 0] step:401/10000 train_time:26951ms step_avg:67.21ms +[2025-07-05 09:43:24] [Rank 0] step:401/10000 train_time:26951ms step_avg:67.21ms +[2025-07-05 09:43:25] [Rank 0] step:421/10000 train_time:28288ms step_avg:67.19ms +[2025-07-05 09:43:25] [Rank 0] step:421/10000 train_time:28288ms step_avg:67.19ms +[2025-07-05 09:43:27] [Rank 0] step:441/10000 train_time:29624ms step_avg:67.17ms +[2025-07-05 09:43:27] [Rank 0] step:441/10000 train_time:29624ms step_avg:67.17ms +[2025-07-05 09:43:28] [Rank 0] step:461/10000 train_time:30960ms step_avg:67.16ms +[2025-07-05 09:43:28] [Rank 0] step:461/10000 train_time:30960ms step_avg:67.16ms +[2025-07-05 09:43:29] [Rank 0] step:481/10000 train_time:32294ms step_avg:67.14ms +[2025-07-05 09:43:29] [Rank 0] step:481/10000 train_time:32294ms step_avg:67.14ms +[2025-07-05 09:43:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:43:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:43:32] [Rank 0] PRINT: step:500/10000 train_loss:3.8064 val_loss:2.0947 train_time:34238ms step_avg:68.48ms +[2025-07-05 09:43:32] [Rank 0] PRINT: step:500/10000 train_loss:3.8064 val_loss:2.0947 train_time:34238ms step_avg:68.48ms +[2025-07-05 09:43:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:43:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..36fc86d1b4fb1861e39a38e294b961bf1de98779 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "02f6e0d2-d2d7-4111-9e8e-972231f8f258", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/training_log_02f6e0d2-d2d7-4111-9e8e-972231f8f258.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/training_log_02f6e0d2-d2d7-4111-9e8e-972231f8f258.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2841eb8e883352e91c11a81656230a635ae34f8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46/training_log_02f6e0d2-d2d7-4111-9e8e-972231f8f258.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:09:02] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:09:02 2025 --- +[2025-07-05 10:09:02] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:09:02 2025 --- +[2025-07-05 10:09:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:09:02] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:09:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:09:02] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:09:02] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:09:02] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-05 10:09:02] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46 +[2025-07-05 10:09:02] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_46 +[2025-07-05 10:09:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:09:02] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:09:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:09:03] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:09:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:09:03] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:09:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:09:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:09:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:09:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:09:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:09:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:09:05] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:09:05] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:09:05] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:09:05] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:09:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:09:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:09:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:09:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:09:05] [Rank 0] PRINT: Model returns: +[2025-07-05 10:09:05] [Rank 0] PRINT: Model returns: +[2025-07-05 10:09:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:09:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:09:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:09:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:09:05] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 10:09:05] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 10:09:05] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:09:05] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:09:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:09:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:09:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:09:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:09:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:09:06] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:09:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:09:06] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:10:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:10:09] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:10:09] [Rank 0] PRINT: Starting training... +[2025-07-05 10:10:09] [Rank 0] PRINT: Starting training... +[2025-07-05 10:10:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:10:16] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:10:18] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.33ms +[2025-07-05 10:10:18] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.33ms +[2025-07-05 10:10:19] [Rank 0] step:41/10000 train_time:2331ms step_avg:56.85ms +[2025-07-05 10:10:19] [Rank 0] step:41/10000 train_time:2331ms step_avg:56.85ms +[2025-07-05 10:10:21] [Rank 0] step:61/10000 train_time:3649ms step_avg:59.82ms +[2025-07-05 10:10:21] [Rank 0] step:61/10000 train_time:3649ms step_avg:59.82ms +[2025-07-05 10:10:22] [Rank 0] step:81/10000 train_time:4975ms step_avg:61.42ms +[2025-07-05 10:10:22] [Rank 0] step:81/10000 train_time:4975ms step_avg:61.42ms +[2025-07-05 10:10:23] [Rank 0] step:101/10000 train_time:6401ms step_avg:63.38ms +[2025-07-05 10:10:23] [Rank 0] step:101/10000 train_time:6401ms step_avg:63.38ms +[2025-07-05 10:10:25] [Rank 0] step:121/10000 train_time:7727ms step_avg:63.86ms +[2025-07-05 10:10:25] [Rank 0] step:121/10000 train_time:7727ms step_avg:63.86ms +[2025-07-05 10:10:26] [Rank 0] step:141/10000 train_time:9057ms step_avg:64.23ms +[2025-07-05 10:10:26] [Rank 0] step:141/10000 train_time:9057ms step_avg:64.23ms +[2025-07-05 10:10:27] [Rank 0] step:161/10000 train_time:10387ms step_avg:64.52ms +[2025-07-05 10:10:27] [Rank 0] step:161/10000 train_time:10387ms step_avg:64.52ms +[2025-07-05 10:10:29] [Rank 0] step:181/10000 train_time:12394ms step_avg:68.48ms +[2025-07-05 10:10:29] [Rank 0] step:181/10000 train_time:12394ms step_avg:68.48ms +[2025-07-05 10:10:30] [Rank 0] step:201/10000 train_time:13113ms step_avg:65.24ms +[2025-07-05 10:10:30] [Rank 0] step:201/10000 train_time:13113ms step_avg:65.24ms +[2025-07-05 10:10:31] [Rank 0] step:221/10000 train_time:14446ms step_avg:65.37ms +[2025-07-05 10:10:31] [Rank 0] step:221/10000 train_time:14446ms step_avg:65.37ms +[2025-07-05 10:10:33] [Rank 0] step:241/10000 train_time:15780ms step_avg:65.48ms +[2025-07-05 10:10:33] [Rank 0] step:241/10000 train_time:15780ms step_avg:65.48ms +[2025-07-05 10:10:34] [Rank 0] step:261/10000 train_time:17118ms step_avg:65.59ms +[2025-07-05 10:10:34] [Rank 0] step:261/10000 train_time:17118ms step_avg:65.59ms +[2025-07-05 10:10:35] [Rank 0] step:281/10000 train_time:18453ms step_avg:65.67ms +[2025-07-05 10:10:35] [Rank 0] step:281/10000 train_time:18453ms step_avg:65.67ms +[2025-07-05 10:10:37] [Rank 0] step:301/10000 train_time:19790ms step_avg:65.75ms +[2025-07-05 10:10:37] [Rank 0] step:301/10000 train_time:19790ms step_avg:65.75ms +[2025-07-05 10:10:38] [Rank 0] step:321/10000 train_time:21127ms step_avg:65.82ms +[2025-07-05 10:10:38] [Rank 0] step:321/10000 train_time:21127ms step_avg:65.82ms +[2025-07-05 10:10:40] [Rank 0] step:341/10000 train_time:22464ms step_avg:65.88ms +[2025-07-05 10:10:40] [Rank 0] step:341/10000 train_time:22464ms step_avg:65.88ms +[2025-07-05 10:10:41] [Rank 0] step:361/10000 train_time:24459ms step_avg:67.75ms +[2025-07-05 10:10:41] [Rank 0] step:361/10000 train_time:24459ms step_avg:67.75ms +[2025-07-05 10:10:42] [Rank 0] step:381/10000 train_time:25181ms step_avg:66.09ms +[2025-07-05 10:10:42] [Rank 0] step:381/10000 train_time:25181ms step_avg:66.09ms +[2025-07-05 10:10:44] [Rank 0] step:401/10000 train_time:26519ms step_avg:66.13ms +[2025-07-05 10:10:44] [Rank 0] step:401/10000 train_time:26519ms step_avg:66.13ms +[2025-07-05 10:10:45] [Rank 0] step:421/10000 train_time:27855ms step_avg:66.16ms +[2025-07-05 10:10:45] [Rank 0] step:421/10000 train_time:27855ms step_avg:66.16ms +[2025-07-05 10:10:46] [Rank 0] step:441/10000 train_time:29193ms step_avg:66.20ms +[2025-07-05 10:10:46] [Rank 0] step:441/10000 train_time:29193ms step_avg:66.20ms +[2025-07-05 10:10:48] [Rank 0] step:461/10000 train_time:30528ms step_avg:66.22ms +[2025-07-05 10:10:48] [Rank 0] step:461/10000 train_time:30528ms step_avg:66.22ms +[2025-07-05 10:10:49] [Rank 0] step:481/10000 train_time:31873ms step_avg:66.26ms +[2025-07-05 10:10:49] [Rank 0] step:481/10000 train_time:31873ms step_avg:66.26ms +[2025-07-05 10:10:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:10:51] [Rank 0] PRINT: step:500/10000 train_loss:3.8113 val_loss:2.4540 train_time:33818ms step_avg:67.64ms +[2025-07-05 10:10:51] [Rank 0] PRINT: step:500/10000 train_loss:3.8113 val_loss:2.4540 train_time:33818ms step_avg:67.64ms +[2025-07-05 10:10:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:10:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fe646de02b070adb0a4ca6f4f159c157335165b2 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 47, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c8a7fb34-5b3b-4d68-a669-3bcd9a202fd5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/training_log_c8a7fb34-5b3b-4d68-a669-3bcd9a202fd5.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/training_log_c8a7fb34-5b3b-4d68-a669-3bcd9a202fd5.txt new file mode 100644 index 0000000000000000000000000000000000000000..776641028809d2e5caa6a3e0b8d5af973a3571b3 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47/training_log_c8a7fb34-5b3b-4d68-a669-3bcd9a202fd5.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:28:22] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:28:22 2025 --- +[2025-07-05 08:28:22] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:28:22 2025 --- +[2025-07-05 08:28:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:28:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=47, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:28:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:28:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:28:22] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:28:22] [Rank 0] PRINT: Using fixed seed: 47 +[2025-07-05 08:28:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47 +[2025-07-05 08:28:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_47 +[2025-07-05 08:28:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:28:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:28:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:28:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:28:23] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:28:23] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:28:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:28:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:28:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:28:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:28:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:28:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:28:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:28:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:28:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:28:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:28:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:28:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:28:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:28:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:28:26] [Rank 0] PRINT: Model returns: +[2025-07-05 08:28:26] [Rank 0] PRINT: Model returns: +[2025-07-05 08:28:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:28:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:28:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:28:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:28:26] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:28:26] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:28:26] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:28:26] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:28:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:28:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:28:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:28:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:28:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:28:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:28:26] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:28:26] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:29:28] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:29:28] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:29:28] [Rank 0] PRINT: Starting training... +[2025-07-05 08:29:28] [Rank 0] PRINT: Starting training... +[2025-07-05 08:29:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:29:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:29:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:29:37] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:29:38] [Rank 0] step:21/10000 train_time:818ms step_avg:38.94ms +[2025-07-05 08:29:38] [Rank 0] step:21/10000 train_time:818ms step_avg:38.94ms +[2025-07-05 08:29:39] [Rank 0] step:41/10000 train_time:2133ms step_avg:52.03ms +[2025-07-05 08:29:39] [Rank 0] step:41/10000 train_time:2133ms step_avg:52.03ms +[2025-07-05 08:29:41] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.55ms +[2025-07-05 08:29:41] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.55ms +[2025-07-05 08:29:42] [Rank 0] step:81/10000 train_time:4771ms step_avg:58.90ms +[2025-07-05 08:29:42] [Rank 0] step:81/10000 train_time:4771ms step_avg:58.90ms +[2025-07-05 08:29:43] [Rank 0] step:101/10000 train_time:6093ms step_avg:60.33ms +[2025-07-05 08:29:43] [Rank 0] step:101/10000 train_time:6093ms step_avg:60.33ms +[2025-07-05 08:29:45] [Rank 0] step:121/10000 train_time:7418ms step_avg:61.30ms +[2025-07-05 08:29:45] [Rank 0] step:121/10000 train_time:7418ms step_avg:61.30ms +[2025-07-05 08:29:46] [Rank 0] step:141/10000 train_time:8744ms step_avg:62.01ms +[2025-07-05 08:29:46] [Rank 0] step:141/10000 train_time:8744ms step_avg:62.01ms +[2025-07-05 08:29:47] [Rank 0] step:161/10000 train_time:10071ms step_avg:62.56ms +[2025-07-05 08:29:47] [Rank 0] step:161/10000 train_time:10071ms step_avg:62.56ms +[2025-07-05 08:29:49] [Rank 0] step:181/10000 train_time:12069ms step_avg:66.68ms +[2025-07-05 08:29:49] [Rank 0] step:181/10000 train_time:12069ms step_avg:66.68ms +[2025-07-05 08:29:50] [Rank 0] step:201/10000 train_time:12785ms step_avg:63.61ms +[2025-07-05 08:29:50] [Rank 0] step:201/10000 train_time:12785ms step_avg:63.61ms +[2025-07-05 08:29:51] [Rank 0] step:221/10000 train_time:14114ms step_avg:63.86ms +[2025-07-05 08:29:51] [Rank 0] step:221/10000 train_time:14114ms step_avg:63.86ms +[2025-07-05 08:29:53] [Rank 0] step:241/10000 train_time:15445ms step_avg:64.09ms +[2025-07-05 08:29:53] [Rank 0] step:241/10000 train_time:15445ms step_avg:64.09ms +[2025-07-05 08:29:54] [Rank 0] step:261/10000 train_time:16776ms step_avg:64.28ms +[2025-07-05 08:29:54] [Rank 0] step:261/10000 train_time:16776ms step_avg:64.28ms +[2025-07-05 08:29:55] [Rank 0] step:281/10000 train_time:18107ms step_avg:64.44ms +[2025-07-05 08:29:55] [Rank 0] step:281/10000 train_time:18107ms step_avg:64.44ms +[2025-07-05 08:29:57] [Rank 0] step:301/10000 train_time:19440ms step_avg:64.58ms +[2025-07-05 08:29:57] [Rank 0] step:301/10000 train_time:19440ms step_avg:64.58ms +[2025-07-05 08:29:58] [Rank 0] step:321/10000 train_time:20772ms step_avg:64.71ms +[2025-07-05 08:29:58] [Rank 0] step:321/10000 train_time:20772ms step_avg:64.71ms +[2025-07-05 08:29:59] [Rank 0] step:341/10000 train_time:22104ms step_avg:64.82ms +[2025-07-05 08:29:59] [Rank 0] step:341/10000 train_time:22104ms step_avg:64.82ms +[2025-07-05 08:30:01] [Rank 0] step:361/10000 train_time:23796ms step_avg:65.92ms +[2025-07-05 08:30:01] [Rank 0] step:361/10000 train_time:23796ms step_avg:65.92ms +[2025-07-05 08:30:02] [Rank 0] step:381/10000 train_time:24771ms step_avg:65.01ms +[2025-07-05 08:30:02] [Rank 0] step:381/10000 train_time:24771ms step_avg:65.01ms +[2025-07-05 08:30:03] [Rank 0] step:401/10000 train_time:26105ms step_avg:65.10ms +[2025-07-05 08:30:03] [Rank 0] step:401/10000 train_time:26105ms step_avg:65.10ms +[2025-07-05 08:30:05] [Rank 0] step:421/10000 train_time:27438ms step_avg:65.17ms +[2025-07-05 08:30:05] [Rank 0] step:421/10000 train_time:27438ms step_avg:65.17ms +[2025-07-05 08:30:06] [Rank 0] step:441/10000 train_time:28771ms step_avg:65.24ms +[2025-07-05 08:30:06] [Rank 0] step:441/10000 train_time:28771ms step_avg:65.24ms +[2025-07-05 08:30:07] [Rank 0] step:461/10000 train_time:30104ms step_avg:65.30ms +[2025-07-05 08:30:07] [Rank 0] step:461/10000 train_time:30104ms step_avg:65.30ms +[2025-07-05 08:30:09] [Rank 0] step:481/10000 train_time:31435ms step_avg:65.35ms +[2025-07-05 08:30:09] [Rank 0] step:481/10000 train_time:31435ms step_avg:65.35ms +[2025-07-05 08:30:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:30:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:30:11] [Rank 0] PRINT: step:500/10000 train_loss:3.6111 val_loss:1.9114 train_time:33374ms step_avg:66.75ms +[2025-07-05 08:30:11] [Rank 0] PRINT: step:500/10000 train_loss:3.6111 val_loss:1.9114 train_time:33374ms step_avg:66.75ms +[2025-07-05 08:30:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:30:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ca149dd62825d8eb91acf376adb753cc22e48ca8 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "76959754-29f0-40f1-910e-a5e07fd200fa", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_76959754-29f0-40f1-910e-a5e07fd200fa.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_76959754-29f0-40f1-910e-a5e07fd200fa.txt new file mode 100644 index 0000000000000000000000000000000000000000..6352e484768dfab1c890a6c5caaba14fb22a64fd --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_76959754-29f0-40f1-910e-a5e07fd200fa.txt @@ -0,0 +1,2662 @@ +[2025-07-05 08:55:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:55:45 2025 --- +[2025-07-05 08:55:45] [Rank 0] PRINT: --- Script Start: Sat Jul 5 08:55:45 2025 --- +[2025-07-05 08:55:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:55:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 08:55:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:55:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 08:55:45] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:55:45] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-05 08:55:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48 +[2025-07-05 08:55:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48 +[2025-07-05 08:55:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:55:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 08:55:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:55:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 08:55:45] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:55:45] [Rank 0] PRINT: Constructing model... +[2025-07-05 08:55:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:55:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 08:55:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:55:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 08:55:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:55:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 08:55:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:55:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 08:55:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:55:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 08:55:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:55:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 08:55:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:55:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 08:55:48] [Rank 0] PRINT: Model returns: +[2025-07-05 08:55:48] [Rank 0] PRINT: Model returns: +[2025-07-05 08:55:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:55:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 08:55:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:55:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 08:55:48] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:55:48] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 08:55:48] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:55:48] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 08:55:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:55:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 08:55:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:55:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 08:55:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:55:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 08:55:48] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:55:48] [Rank 0] PRINT: Starting warmup... +[2025-07-05 08:56:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:56:53] [Rank 0] PRINT: Warmup complete. +[2025-07-05 08:56:54] [Rank 0] PRINT: Starting training... +[2025-07-05 08:56:54] [Rank 0] PRINT: Starting training... +[2025-07-05 08:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:57:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 08:57:02] [Rank 0] step:21/10000 train_time:812ms step_avg:38.66ms +[2025-07-05 08:57:02] [Rank 0] step:21/10000 train_time:812ms step_avg:38.66ms +[2025-07-05 08:57:04] [Rank 0] step:41/10000 train_time:2129ms step_avg:51.94ms +[2025-07-05 08:57:04] [Rank 0] step:41/10000 train_time:2129ms step_avg:51.94ms +[2025-07-05 08:57:05] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.55ms +[2025-07-05 08:57:05] [Rank 0] step:61/10000 train_time:3450ms step_avg:56.55ms +[2025-07-05 08:57:06] [Rank 0] step:81/10000 train_time:4772ms step_avg:58.92ms +[2025-07-05 08:57:06] [Rank 0] step:81/10000 train_time:4772ms step_avg:58.92ms +[2025-07-05 08:57:08] [Rank 0] step:101/10000 train_time:6101ms step_avg:60.40ms +[2025-07-05 08:57:08] [Rank 0] step:101/10000 train_time:6101ms step_avg:60.40ms +[2025-07-05 08:57:09] [Rank 0] step:121/10000 train_time:7431ms step_avg:61.41ms +[2025-07-05 08:57:09] [Rank 0] step:121/10000 train_time:7431ms step_avg:61.41ms +[2025-07-05 08:57:10] [Rank 0] step:141/10000 train_time:8760ms step_avg:62.13ms +[2025-07-05 08:57:10] [Rank 0] step:141/10000 train_time:8760ms step_avg:62.13ms +[2025-07-05 08:57:12] [Rank 0] step:161/10000 train_time:10092ms step_avg:62.68ms +[2025-07-05 08:57:12] [Rank 0] step:161/10000 train_time:10092ms step_avg:62.68ms +[2025-07-05 08:57:13] [Rank 0] step:181/10000 train_time:12079ms step_avg:66.74ms +[2025-07-05 08:57:13] [Rank 0] step:181/10000 train_time:12079ms step_avg:66.74ms +[2025-07-05 08:57:14] [Rank 0] step:201/10000 train_time:12798ms step_avg:63.67ms +[2025-07-05 08:57:14] [Rank 0] step:201/10000 train_time:12798ms step_avg:63.67ms +[2025-07-05 08:57:16] [Rank 0] step:221/10000 train_time:14131ms step_avg:63.94ms +[2025-07-05 08:57:16] [Rank 0] step:221/10000 train_time:14131ms step_avg:63.94ms +[2025-07-05 08:57:17] [Rank 0] step:241/10000 train_time:15466ms step_avg:64.18ms +[2025-07-05 08:57:17] [Rank 0] step:241/10000 train_time:15466ms step_avg:64.18ms +[2025-07-05 08:57:18] [Rank 0] step:261/10000 train_time:16801ms step_avg:64.37ms +[2025-07-05 08:57:18] [Rank 0] step:261/10000 train_time:16801ms step_avg:64.37ms +[2025-07-05 08:57:20] [Rank 0] step:281/10000 train_time:18136ms step_avg:64.54ms +[2025-07-05 08:57:20] [Rank 0] step:281/10000 train_time:18136ms step_avg:64.54ms +[2025-07-05 08:57:21] [Rank 0] step:301/10000 train_time:19473ms step_avg:64.69ms +[2025-07-05 08:57:21] [Rank 0] step:301/10000 train_time:19473ms step_avg:64.69ms +[2025-07-05 08:57:22] [Rank 0] step:321/10000 train_time:20808ms step_avg:64.82ms +[2025-07-05 08:57:22] [Rank 0] step:321/10000 train_time:20808ms step_avg:64.82ms +[2025-07-05 08:57:24] [Rank 0] step:341/10000 train_time:22143ms step_avg:64.93ms +[2025-07-05 08:57:24] [Rank 0] step:341/10000 train_time:22143ms step_avg:64.93ms +[2025-07-05 08:57:25] [Rank 0] step:361/10000 train_time:23732ms step_avg:65.74ms +[2025-07-05 08:57:25] [Rank 0] step:361/10000 train_time:23732ms step_avg:65.74ms +[2025-07-05 08:57:27] [Rank 0] step:381/10000 train_time:24864ms step_avg:65.26ms +[2025-07-05 08:57:27] [Rank 0] step:381/10000 train_time:24864ms step_avg:65.26ms +[2025-07-05 08:57:28] [Rank 0] step:401/10000 train_time:26201ms step_avg:65.34ms +[2025-07-05 08:57:28] [Rank 0] step:401/10000 train_time:26201ms step_avg:65.34ms +[2025-07-05 08:57:29] [Rank 0] step:421/10000 train_time:27536ms step_avg:65.41ms +[2025-07-05 08:57:29] [Rank 0] step:421/10000 train_time:27536ms step_avg:65.41ms +[2025-07-05 08:57:31] [Rank 0] step:441/10000 train_time:28872ms step_avg:65.47ms +[2025-07-05 08:57:31] [Rank 0] step:441/10000 train_time:28872ms step_avg:65.47ms +[2025-07-05 08:57:32] [Rank 0] step:461/10000 train_time:30209ms step_avg:65.53ms +[2025-07-05 08:57:32] [Rank 0] step:461/10000 train_time:30209ms step_avg:65.53ms +[2025-07-05 08:57:33] [Rank 0] step:481/10000 train_time:31547ms step_avg:65.59ms +[2025-07-05 08:57:33] [Rank 0] step:481/10000 train_time:31547ms step_avg:65.59ms +[2025-07-05 08:57:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 08:57:35] [Rank 0] PRINT: step:500/10000 train_loss:3.4149 val_loss:1.9642 train_time:33493ms step_avg:66.99ms +[2025-07-05 08:57:35] [Rank 0] PRINT: step:500/10000 train_loss:3.4149 val_loss:1.9642 train_time:33493ms step_avg:66.99ms +[2025-07-05 08:57:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 08:57:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e5390d02a701f2ed11e72bb04b79a71a0a6ffd84 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "51c6740f-6241-420b-91fe-6a9b7ba5bc73", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/training_log_51c6740f-6241-420b-91fe-6a9b7ba5bc73.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/training_log_51c6740f-6241-420b-91fe-6a9b7ba5bc73.txt new file mode 100644 index 0000000000000000000000000000000000000000..4762b8f1b5c48b589aa72c98ee505d941cedb557 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49/training_log_51c6740f-6241-420b-91fe-6a9b7ba5bc73.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:23:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:23:18 2025 --- +[2025-07-05 09:23:18] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:23:18 2025 --- +[2025-07-05 09:23:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:23:18] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:23:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:23:18] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:23:18] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:23:18] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-05 09:23:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49 +[2025-07-05 09:23:18] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_49 +[2025-07-05 09:23:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:23:18] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:23:18] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:23:18] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:23:18] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:23:18] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:23:20] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:23:20] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:23:20] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:23:20] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:23:20] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:23:20] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:23:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:23:21] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:23:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:23:21] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:23:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:23:21] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:23:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:23:21] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:23:21] [Rank 0] PRINT: Model returns: +[2025-07-05 09:23:21] [Rank 0] PRINT: Model returns: +[2025-07-05 09:23:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:23:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:23:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:23:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:23:21] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:23:21] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:23:21] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:23:21] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:23:21] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:23:21] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:23:21] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:23:21] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:23:21] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:23:21] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:23:21] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:23:21] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:24:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:24:25] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:24:25] [Rank 0] PRINT: Starting training... +[2025-07-05 09:24:25] [Rank 0] PRINT: Starting training... +[2025-07-05 09:24:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:24:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:24:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:24:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:24:34] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.51ms +[2025-07-05 09:24:34] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.51ms +[2025-07-05 09:24:35] [Rank 0] step:41/10000 train_time:2330ms step_avg:56.83ms +[2025-07-05 09:24:35] [Rank 0] step:41/10000 train_time:2330ms step_avg:56.83ms +[2025-07-05 09:24:37] [Rank 0] step:61/10000 train_time:3643ms step_avg:59.72ms +[2025-07-05 09:24:37] [Rank 0] step:61/10000 train_time:3643ms step_avg:59.72ms +[2025-07-05 09:24:38] [Rank 0] step:81/10000 train_time:4960ms step_avg:61.23ms +[2025-07-05 09:24:38] [Rank 0] step:81/10000 train_time:4960ms step_avg:61.23ms +[2025-07-05 09:24:39] [Rank 0] step:101/10000 train_time:6282ms step_avg:62.20ms +[2025-07-05 09:24:39] [Rank 0] step:101/10000 train_time:6282ms step_avg:62.20ms +[2025-07-05 09:24:41] [Rank 0] step:121/10000 train_time:7607ms step_avg:62.87ms +[2025-07-05 09:24:41] [Rank 0] step:121/10000 train_time:7607ms step_avg:62.87ms +[2025-07-05 09:24:42] [Rank 0] step:141/10000 train_time:8934ms step_avg:63.36ms +[2025-07-05 09:24:42] [Rank 0] step:141/10000 train_time:8934ms step_avg:63.36ms +[2025-07-05 09:24:43] [Rank 0] step:161/10000 train_time:10263ms step_avg:63.75ms +[2025-07-05 09:24:43] [Rank 0] step:161/10000 train_time:10263ms step_avg:63.75ms +[2025-07-05 09:24:45] [Rank 0] step:181/10000 train_time:11641ms step_avg:64.32ms +[2025-07-05 09:24:45] [Rank 0] step:181/10000 train_time:11641ms step_avg:64.32ms +[2025-07-05 09:24:46] [Rank 0] step:201/10000 train_time:12997ms step_avg:64.66ms +[2025-07-05 09:24:46] [Rank 0] step:201/10000 train_time:12997ms step_avg:64.66ms +[2025-07-05 09:24:47] [Rank 0] step:221/10000 train_time:14329ms step_avg:64.84ms +[2025-07-05 09:24:47] [Rank 0] step:221/10000 train_time:14329ms step_avg:64.84ms +[2025-07-05 09:24:49] [Rank 0] step:241/10000 train_time:15660ms step_avg:64.98ms +[2025-07-05 09:24:49] [Rank 0] step:241/10000 train_time:15660ms step_avg:64.98ms +[2025-07-05 09:24:50] [Rank 0] step:261/10000 train_time:16994ms step_avg:65.11ms +[2025-07-05 09:24:50] [Rank 0] step:261/10000 train_time:16994ms step_avg:65.11ms +[2025-07-05 09:24:51] [Rank 0] step:281/10000 train_time:18326ms step_avg:65.22ms +[2025-07-05 09:24:51] [Rank 0] step:281/10000 train_time:18326ms step_avg:65.22ms +[2025-07-05 09:24:53] [Rank 0] step:301/10000 train_time:19658ms step_avg:65.31ms +[2025-07-05 09:24:53] [Rank 0] step:301/10000 train_time:19658ms step_avg:65.31ms +[2025-07-05 09:24:54] [Rank 0] step:321/10000 train_time:20994ms step_avg:65.40ms +[2025-07-05 09:24:54] [Rank 0] step:321/10000 train_time:20994ms step_avg:65.40ms +[2025-07-05 09:24:55] [Rank 0] step:341/10000 train_time:22329ms step_avg:65.48ms +[2025-07-05 09:24:55] [Rank 0] step:341/10000 train_time:22329ms step_avg:65.48ms +[2025-07-05 09:24:57] [Rank 0] step:361/10000 train_time:23665ms step_avg:65.55ms +[2025-07-05 09:24:57] [Rank 0] step:361/10000 train_time:23665ms step_avg:65.55ms +[2025-07-05 09:24:58] [Rank 0] step:381/10000 train_time:25066ms step_avg:65.79ms +[2025-07-05 09:24:58] [Rank 0] step:381/10000 train_time:25066ms step_avg:65.79ms +[2025-07-05 09:24:59] [Rank 0] step:401/10000 train_time:26398ms step_avg:65.83ms +[2025-07-05 09:24:59] [Rank 0] step:401/10000 train_time:26398ms step_avg:65.83ms +[2025-07-05 09:25:01] [Rank 0] step:421/10000 train_time:27733ms step_avg:65.87ms +[2025-07-05 09:25:01] [Rank 0] step:421/10000 train_time:27733ms step_avg:65.87ms +[2025-07-05 09:25:02] [Rank 0] step:441/10000 train_time:29066ms step_avg:65.91ms +[2025-07-05 09:25:02] [Rank 0] step:441/10000 train_time:29066ms step_avg:65.91ms +[2025-07-05 09:25:03] [Rank 0] step:461/10000 train_time:30402ms step_avg:65.95ms +[2025-07-05 09:25:03] [Rank 0] step:461/10000 train_time:30402ms step_avg:65.95ms +[2025-07-05 09:25:05] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 09:25:05] [Rank 0] step:481/10000 train_time:31736ms step_avg:65.98ms +[2025-07-05 09:25:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:25:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:25:07] [Rank 0] PRINT: step:500/10000 train_loss:3.5864 val_loss:1.9387 train_time:33678ms step_avg:67.36ms +[2025-07-05 09:25:07] [Rank 0] PRINT: step:500/10000 train_loss:3.5864 val_loss:1.9387 train_time:33678ms step_avg:67.36ms +[2025-07-05 09:25:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:25:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2f8e6daa4b7256235224117db2a9fbb32d9f7e5f --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 50, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "fadfc077-1407-4356-9213-e428b838935b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/training_log_fadfc077-1407-4356-9213-e428b838935b.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/training_log_fadfc077-1407-4356-9213-e428b838935b.txt new file mode 100644 index 0000000000000000000000000000000000000000..669df36dfac82a381c3390d58ad7af9617dbf7e1 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50/training_log_fadfc077-1407-4356-9213-e428b838935b.txt @@ -0,0 +1,2662 @@ +[2025-07-05 09:50:41] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:50:41 2025 --- +[2025-07-05 09:50:41] [Rank 0] PRINT: --- Script Start: Sat Jul 5 09:50:41 2025 --- +[2025-07-05 09:50:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:50:41] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=50, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 09:50:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:50:41] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 09:50:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:50:41] [Rank 0] PRINT: Using fixed seed: 50 +[2025-07-05 09:50:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50 +[2025-07-05 09:50:41] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_50 +[2025-07-05 09:50:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:50:41] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 09:50:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:50:42] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 09:50:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:50:42] [Rank 0] PRINT: Constructing model... +[2025-07-05 09:50:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:50:44] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 09:50:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:50:44] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 09:50:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:50:44] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 09:50:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:50:45] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 09:50:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:50:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 09:50:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:50:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 09:50:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:50:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 09:50:45] [Rank 0] PRINT: Model returns: +[2025-07-05 09:50:45] [Rank 0] PRINT: Model returns: +[2025-07-05 09:50:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:50:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 09:50:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:50:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 09:50:45] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:50:45] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 09:50:45] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:50:45] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 09:50:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:50:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 09:50:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:50:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 09:50:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:50:45] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 09:50:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:50:45] [Rank 0] PRINT: Starting warmup... +[2025-07-05 09:51:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:51:48] [Rank 0] PRINT: Warmup complete. +[2025-07-05 09:51:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:51:48] [Rank 0] PRINT: Starting training... +[2025-07-05 09:51:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:51:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:51:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:51:56] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 09:51:58] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.34ms +[2025-07-05 09:51:58] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.34ms +[2025-07-05 09:51:59] [Rank 0] step:41/10000 train_time:2328ms step_avg:56.79ms +[2025-07-05 09:51:59] [Rank 0] step:41/10000 train_time:2328ms step_avg:56.79ms +[2025-07-05 09:52:00] [Rank 0] step:61/10000 train_time:3642ms step_avg:59.71ms +[2025-07-05 09:52:00] [Rank 0] step:61/10000 train_time:3642ms step_avg:59.71ms +[2025-07-05 09:52:02] [Rank 0] step:81/10000 train_time:4959ms step_avg:61.22ms +[2025-07-05 09:52:02] [Rank 0] step:81/10000 train_time:4959ms step_avg:61.22ms +[2025-07-05 09:52:03] [Rank 0] step:101/10000 train_time:6278ms step_avg:62.16ms +[2025-07-05 09:52:03] [Rank 0] step:101/10000 train_time:6278ms step_avg:62.16ms +[2025-07-05 09:52:04] [Rank 0] step:121/10000 train_time:7599ms step_avg:62.80ms +[2025-07-05 09:52:04] [Rank 0] step:121/10000 train_time:7599ms step_avg:62.80ms +[2025-07-05 09:52:06] [Rank 0] step:141/10000 train_time:8923ms step_avg:63.29ms +[2025-07-05 09:52:06] [Rank 0] step:141/10000 train_time:8923ms step_avg:63.29ms +[2025-07-05 09:52:07] [Rank 0] step:161/10000 train_time:10247ms step_avg:63.65ms +[2025-07-05 09:52:07] [Rank 0] step:161/10000 train_time:10247ms step_avg:63.65ms +[2025-07-05 09:52:08] [Rank 0] step:181/10000 train_time:12257ms step_avg:67.72ms +[2025-07-05 09:52:08] [Rank 0] step:181/10000 train_time:12257ms step_avg:67.72ms +[2025-07-05 09:52:10] [Rank 0] step:201/10000 train_time:12973ms step_avg:64.54ms +[2025-07-05 09:52:10] [Rank 0] step:201/10000 train_time:12973ms step_avg:64.54ms +[2025-07-05 09:52:11] [Rank 0] step:221/10000 train_time:14304ms step_avg:64.73ms +[2025-07-05 09:52:11] [Rank 0] step:221/10000 train_time:14304ms step_avg:64.73ms +[2025-07-05 09:52:12] [Rank 0] step:241/10000 train_time:15634ms step_avg:64.87ms +[2025-07-05 09:52:12] [Rank 0] step:241/10000 train_time:15634ms step_avg:64.87ms +[2025-07-05 09:52:14] [Rank 0] step:261/10000 train_time:16965ms step_avg:65.00ms +[2025-07-05 09:52:14] [Rank 0] step:261/10000 train_time:16965ms step_avg:65.00ms +[2025-07-05 09:52:15] [Rank 0] step:281/10000 train_time:18295ms step_avg:65.11ms +[2025-07-05 09:52:15] [Rank 0] step:281/10000 train_time:18295ms step_avg:65.11ms +[2025-07-05 09:52:16] [Rank 0] step:301/10000 train_time:19625ms step_avg:65.20ms +[2025-07-05 09:52:16] [Rank 0] step:301/10000 train_time:19625ms step_avg:65.20ms +[2025-07-05 09:52:18] [Rank 0] step:321/10000 train_time:20958ms step_avg:65.29ms +[2025-07-05 09:52:18] [Rank 0] step:321/10000 train_time:20958ms step_avg:65.29ms +[2025-07-05 09:52:19] [Rank 0] step:341/10000 train_time:22290ms step_avg:65.37ms +[2025-07-05 09:52:19] [Rank 0] step:341/10000 train_time:22290ms step_avg:65.37ms +[2025-07-05 09:52:20] [Rank 0] step:361/10000 train_time:24290ms step_avg:67.29ms +[2025-07-05 09:52:20] [Rank 0] step:361/10000 train_time:24290ms step_avg:67.29ms +[2025-07-05 09:52:22] [Rank 0] step:381/10000 train_time:25009ms step_avg:65.64ms +[2025-07-05 09:52:22] [Rank 0] step:381/10000 train_time:25009ms step_avg:65.64ms +[2025-07-05 09:52:23] [Rank 0] step:401/10000 train_time:26343ms step_avg:65.69ms +[2025-07-05 09:52:23] [Rank 0] step:401/10000 train_time:26343ms step_avg:65.69ms +[2025-07-05 09:52:24] [Rank 0] step:421/10000 train_time:27676ms step_avg:65.74ms +[2025-07-05 09:52:24] [Rank 0] step:421/10000 train_time:27676ms step_avg:65.74ms +[2025-07-05 09:52:26] [Rank 0] step:441/10000 train_time:29009ms step_avg:65.78ms +[2025-07-05 09:52:26] [Rank 0] step:441/10000 train_time:29009ms step_avg:65.78ms +[2025-07-05 09:52:27] [Rank 0] step:461/10000 train_time:30342ms step_avg:65.82ms +[2025-07-05 09:52:27] [Rank 0] step:461/10000 train_time:30342ms step_avg:65.82ms +[2025-07-05 09:52:28] [Rank 0] step:481/10000 train_time:31675ms step_avg:65.85ms +[2025-07-05 09:52:28] [Rank 0] step:481/10000 train_time:31675ms step_avg:65.85ms +[2025-07-05 09:52:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:52:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 09:52:31] [Rank 0] PRINT: step:500/10000 train_loss:4.2106 val_loss:2.2905 train_time:33613ms step_avg:67.23ms +[2025-07-05 09:52:31] [Rank 0] PRINT: step:500/10000 train_loss:4.2106 val_loss:2.2905 train_time:33613ms step_avg:67.23ms +[2025-07-05 09:52:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 09:52:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/config.json b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/config.json new file mode 100644 index 0000000000000000000000000000000000000000..035db6b414a08472b599a112de9a32e213188b37 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 51, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9c568e28-e888-4824-90fa-53793bd70272", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/training_log_9c568e28-e888-4824-90fa-53793bd70272.txt b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/training_log_9c568e28-e888-4824-90fa-53793bd70272.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6091936cb82bf1ab973717bdb974fd8fc51ad49 --- /dev/null +++ b/logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51/training_log_9c568e28-e888-4824-90fa-53793bd70272.txt @@ -0,0 +1,2662 @@ +[2025-07-05 10:18:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:18:07 2025 --- +[2025-07-05 10:18:07] [Rank 0] PRINT: --- Script Start: Sat Jul 5 10:18:07 2025 --- +[2025-07-05 10:18:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:18:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=51, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-05 10:18:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:18:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-05 10:18:07] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:18:07] [Rank 0] PRINT: Using fixed seed: 51 +[2025-07-05 10:18:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51 +[2025-07-05 10:18:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_51 +[2025-07-05 10:18:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:18:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-05 10:18:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:18:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-05 10:18:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:18:07] [Rank 0] PRINT: Constructing model... +[2025-07-05 10:18:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:18:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-05 10:18:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:18:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-05 10:18:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:18:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-05 10:18:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:18:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-05 10:18:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:18:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-05 10:18:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:18:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-05 10:18:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:18:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-05 10:18:10] [Rank 0] PRINT: Model returns: +[2025-07-05 10:18:10] [Rank 0] PRINT: Model returns: +[2025-07-05 10:18:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:18:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-05 10:18:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:18:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-05 10:18:11] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 10:18:11] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-05 10:18:11] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:18:11] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-05 10:18:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:18:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-05 10:18:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:18:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-05 10:18:11] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:18:11] [Rank 0] PRINT: Model compilation complete. +[2025-07-05 10:18:11] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:18:11] [Rank 0] PRINT: Starting warmup... +[2025-07-05 10:19:14] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:19:14] [Rank 0] PRINT: Warmup complete. +[2025-07-05 10:19:14] [Rank 0] PRINT: Starting training... +[2025-07-05 10:19:14] [Rank 0] PRINT: Starting training... +[2025-07-05 10:19:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:19:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-05 10:19:23] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 10:19:23] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.38ms +[2025-07-05 10:19:24] [Rank 0] step:41/10000 train_time:2327ms step_avg:56.76ms +[2025-07-05 10:19:24] [Rank 0] step:41/10000 train_time:2327ms step_avg:56.76ms +[2025-07-05 10:19:26] [Rank 0] step:61/10000 train_time:3641ms step_avg:59.68ms +[2025-07-05 10:19:26] [Rank 0] step:61/10000 train_time:3641ms step_avg:59.68ms +[2025-07-05 10:19:27] [Rank 0] step:81/10000 train_time:4956ms step_avg:61.19ms +[2025-07-05 10:19:27] [Rank 0] step:81/10000 train_time:4956ms step_avg:61.19ms +[2025-07-05 10:19:28] [Rank 0] step:101/10000 train_time:6279ms step_avg:62.17ms +[2025-07-05 10:19:28] [Rank 0] step:101/10000 train_time:6279ms step_avg:62.17ms +[2025-07-05 10:19:30] [Rank 0] step:121/10000 train_time:7605ms step_avg:62.85ms +[2025-07-05 10:19:30] [Rank 0] step:121/10000 train_time:7605ms step_avg:62.85ms +[2025-07-05 10:19:31] [Rank 0] step:141/10000 train_time:8931ms step_avg:63.34ms +[2025-07-05 10:19:31] [Rank 0] step:141/10000 train_time:8931ms step_avg:63.34ms +[2025-07-05 10:19:32] [Rank 0] step:161/10000 train_time:10259ms step_avg:63.72ms +[2025-07-05 10:19:32] [Rank 0] step:161/10000 train_time:10259ms step_avg:63.72ms +[2025-07-05 10:19:34] [Rank 0] step:181/10000 train_time:11588ms step_avg:64.02ms +[2025-07-05 10:19:34] [Rank 0] step:181/10000 train_time:11588ms step_avg:64.02ms +[2025-07-05 10:19:35] [Rank 0] step:201/10000 train_time:12991ms step_avg:64.63ms +[2025-07-05 10:19:35] [Rank 0] step:201/10000 train_time:12991ms step_avg:64.63ms +[2025-07-05 10:19:36] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.80ms +[2025-07-05 10:19:36] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.80ms +[2025-07-05 10:19:38] [Rank 0] step:241/10000 train_time:15654ms step_avg:64.95ms +[2025-07-05 10:19:38] [Rank 0] step:241/10000 train_time:15654ms step_avg:64.95ms +[2025-07-05 10:19:39] [Rank 0] step:261/10000 train_time:16986ms step_avg:65.08ms +[2025-07-05 10:19:39] [Rank 0] step:261/10000 train_time:16986ms step_avg:65.08ms +[2025-07-05 10:19:40] [Rank 0] step:281/10000 train_time:18318ms step_avg:65.19ms +[2025-07-05 10:19:40] [Rank 0] step:281/10000 train_time:18318ms step_avg:65.19ms +[2025-07-05 10:19:42] [Rank 0] step:301/10000 train_time:19650ms step_avg:65.28ms +[2025-07-05 10:19:42] [Rank 0] step:301/10000 train_time:19650ms step_avg:65.28ms +[2025-07-05 10:19:43] [Rank 0] step:321/10000 train_time:20982ms step_avg:65.36ms +[2025-07-05 10:19:43] [Rank 0] step:321/10000 train_time:20982ms step_avg:65.36ms +[2025-07-05 10:19:44] [Rank 0] step:341/10000 train_time:22315ms step_avg:65.44ms +[2025-07-05 10:19:44] [Rank 0] step:341/10000 train_time:22315ms step_avg:65.44ms +[2025-07-05 10:19:46] [Rank 0] step:361/10000 train_time:23903ms step_avg:66.21ms +[2025-07-05 10:19:46] [Rank 0] step:361/10000 train_time:23903ms step_avg:66.21ms +[2025-07-05 10:19:47] [Rank 0] step:381/10000 train_time:25024ms step_avg:65.68ms +[2025-07-05 10:19:47] [Rank 0] step:381/10000 train_time:25024ms step_avg:65.68ms +[2025-07-05 10:19:48] [Rank 0] step:401/10000 train_time:26358ms step_avg:65.73ms +[2025-07-05 10:19:48] [Rank 0] step:401/10000 train_time:26358ms step_avg:65.73ms +[2025-07-05 10:19:50] [Rank 0] step:421/10000 train_time:27692ms step_avg:65.78ms +[2025-07-05 10:19:50] [Rank 0] step:421/10000 train_time:27692ms step_avg:65.78ms +[2025-07-05 10:19:51] [Rank 0] step:441/10000 train_time:29026ms step_avg:65.82ms +[2025-07-05 10:19:51] [Rank 0] step:441/10000 train_time:29026ms step_avg:65.82ms +[2025-07-05 10:19:52] [Rank 0] step:461/10000 train_time:30359ms step_avg:65.85ms +[2025-07-05 10:19:52] [Rank 0] step:461/10000 train_time:30359ms step_avg:65.85ms +[2025-07-05 10:19:54] [Rank 0] step:481/10000 train_time:31693ms step_avg:65.89ms +[2025-07-05 10:19:54] [Rank 0] step:481/10000 train_time:31693ms step_avg:65.89ms +[2025-07-05 10:19:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-05 10:19:56] [Rank 0] PRINT: step:500/10000 train_loss:3.6233 val_loss:1.9194 train_time:33632ms step_avg:67.26ms +[2025-07-05 10:19:56] [Rank 0] PRINT: step:500/10000 train_loss:3.6233 val_loss:1.9194 train_time:33632ms step_avg:67.26ms +[2025-07-05 10:19:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-05 10:19:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) ---